diff --git a/.changes/2.11.10.json b/.changes/2.11.10.json new file mode 100644 index 000000000000..50678182d3f0 --- /dev/null +++ b/.changes/2.11.10.json @@ -0,0 +1,31 @@ +{ + "version": "2.11.10", + "date": "2020-04-06", + "entries": [ + { + "type": "feature", + "category": "Amazon Chime", + "description": "Amazon Chime proxy phone sessions let you provide two users with a shared phone number to communicate via voice or text for up to 12 hours without revealing personal phone numbers. When users call or message the provided phone number, they are connected to the other party and their private phone numbers are replaced with the shared number in Caller ID." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "This release adds support for batch transcription jobs within Amazon Transcribe Medical." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Elastic Beanstalk", + "description": "This release adds a new action, ListPlatformBranches, and updates two actions, ListPlatformVersions and DescribePlatformVersion, to support the concept of Elastic Beanstalk platform branches." + }, + { + "type": "feature", + "category": "AWS Identity and Access Management", + "description": "Documentation updates for AWS Identity and Access Management (IAM)." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.11.json b/.changes/2.11.11.json new file mode 100644 index 000000000000..c95d042380f4 --- /dev/null +++ b/.changes/2.11.11.json @@ -0,0 +1,21 @@ +{ + "version": "2.11.11", + "date": "2020-04-07", + "entries": [ + { + "type": "feature", + "category": "Amazon API Gateway", + "description": "Documentation updates for Amazon API Gateway." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "description": "API updates for CodeGuruReviewer" + }, + { + "type": "feature", + "category": "AWS MediaConnect", + "description": "You can now send content from your MediaConnect flow to your virtual private cloud (VPC) without going over the public internet." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.12.json b/.changes/2.11.12.json new file mode 100644 index 000000000000..3a48687df2cd --- /dev/null +++ b/.changes/2.11.12.json @@ -0,0 +1,46 @@ +{ + "version": "2.11.12", + "date": "2020-04-08", + "entries": [ + { + "type": "feature", + "category": "AWS Migration Hub Config", + "description": "Adding ThrottlingException" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Profiler", + "description": "CodeGuruProfiler adds support for resource based authorization to submit profile data." + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "The OrganizationalUnitIds parameter on StackSet and the OrganizationalUnitId parameter on StackInstance, StackInstanceSummary, and StackSetOperationResultSummary are now reserved for internal use. No data is returned for this parameter." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release provides the ability to include tags in EC2 event notifications." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release provides native support for specifying Amazon EFS file systems as volumes in your Amazon ECS task definitions." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "feature: Chime: This release introduces the ability to tag Amazon Chime SDK meeting resources. You can use tags to organize and identify your resources for cost allocation." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK adds support for queue hopping. Jobs can now hop from their original queue to a specified alternate queue, based on the maximum wait time that you specify in the job settings." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.13.json b/.changes/2.11.13.json new file mode 100644 index 000000000000..112e1d2ce94d --- /dev/null +++ b/.changes/2.11.13.json @@ -0,0 +1,81 @@ +{ + "version": "2.11.13", + "date": "2020-04-16", + "entries": [ + { + "type": "feature", + "category": "Amazon Augmented AI Runtime", + "description": "This release updates Amazon Augmented AI ListHumanLoops and StartHumanLoop APIs." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert now allows you to specify your input captions frame rate for SCC captions sources." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "Added a new BatchUpdateFindings action, which allows customers to update selected information about their findings. Security Hub customers use BatchUpdateFindings to track their investigation into a finding. BatchUpdateFindings is intended to replace the UpdateFindings action, which is deprecated." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon EC2 now supports adding AWS resource tags for placement groups and key pairs, at creation time. The CreatePlacementGroup API will now return placement group information when created successfully. The DeleteKeyPair API now supports deletion by resource ID." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "This release adds support for querying GetUserDefinedFunctions API without databaseName." + }, + { + "type": "feature", + "category": "AWS MediaTailor", + "description": "AWS Elemental MediaTailor SDK now allows configuration of Avail Suppression." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release adds support for Amazon RDS Proxy with PostgreSQL compatibility." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "This release includes support for additional OS Versions within EC2 Image Builder." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Sample code for AWS Lambda operations" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Amazon SageMaker now supports running training jobs on ml.g4dn and ml.c5n instance types. Amazon SageMaker supports in \"IN\" operation for Search now." + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "description": "Mark a connection as unreusable if there was a 5xx server error so that a new request will establish a new connection." + }, + { + "type": "feature", + "category": "Amazon Import/Export Snowball", + "description": "An update to the Snowball Edge Storage Optimized device has been launched. Like the previous version, it has 80 TB of capacity for data transfer. Now it has 40 vCPUs, 80 GiB, and a 1 TiB SATA SSD of memory for EC2 compatible compute. The 80 TB of capacity can also be used for EBS-like volumes for AMIs." + }, + { + "type": "feature", + "category": "AWS Migration Hub", + "description": "Adding ThrottlingException" + }, + { + "type": "feature", + "category": "AWS IoT Events", + "description": "API update that allows users to customize event action payloads, and adds support for Amazon DynamoDB actions." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.14.json b/.changes/2.11.14.json new file mode 100644 index 000000000000..abeb535e8253 --- /dev/null +++ b/.changes/2.11.14.json @@ -0,0 +1,21 @@ +{ + "version": "2.11.14", + "date": "2020-04-17", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS OpsWorks CM", + "description": "Documentation updates for opsworkscm" + }, + { + "type": "feature", + "category": "Amazon Fraud Detector", + "description": "Added support for a new rule engine execution mode. Customers will be able to configure their detector versions to evaluate all rules and return outcomes from all 'matched' rules in the GetPrediction API response. Added support for deleting Detectors (DeleteDetector) and Rule Versions (DeleteRuleVersion)." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.8.json b/.changes/2.11.8.json new file mode 100644 index 000000000000..edde3d1b2916 --- /dev/null +++ b/.changes/2.11.8.json @@ -0,0 +1,36 @@ +{ + "version": "2.11.8", + "date": "2020-04-02", + "entries": [ + { + "type": "feature", + "category": "Amazon GameLift", + "description": "Public preview of GameLift FleetIQ as a standalone feature. GameLift FleetIQ makes it possible to use low-cost Spot instances by limiting the chance of interruptions affecting game sessions. FleetIQ is a feature of the managed GameLift service, and can now be used with game hosting in EC2 Auto Scaling groups that you manage in your own account." + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Documentation updates for redshift" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports Automatic Input Failover. This feature provides resiliency upstream of the channel, before ingest starts." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Documentation updates for RDS: creating read replicas is now supported for SQL Server DB instances" + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "description": "Amazon CloudWatch Contributor Insights adds support for tags and tagging on resource creation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.11.9.json b/.changes/2.11.9.json new file mode 100644 index 000000000000..fda913d4e111 --- /dev/null +++ b/.changes/2.11.9.json @@ -0,0 +1,26 @@ +{ + "version": "2.11.9", + "date": "2020-04-03", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS RoboMaker", + "description": "Added support for limiting simulation unit usage, giving more predictable control over simulation cost" + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "description": "Amazon Personalize: Add new response field \"score\" to each item returned by GetRecommendations and GetPersonalizedRanking (HRNN-based recipes only)" + }, + { + "type": "feature", + "category": "AWS S3", + "description": "Allow DefaultS3Presigner.Builder to take a custom S3Configuration" + } + ] +} \ No newline at end of file diff --git a/.changes/2.12.0.json b/.changes/2.12.0.json new file mode 100644 index 000000000000..a9af6d6cccc5 --- /dev/null +++ b/.changes/2.12.0.json @@ -0,0 +1,41 @@ +{ + "version": "2.12.0", + "date": "2020-04-20", + "entries": [ + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "Cost Categories API is now General Available with new dimensions and operations support. You can map costs by account name, service, and charge type dimensions as well as use contains, starts with, and ends with operations. Cost Categories can also be used in RI and SP coverage reports." + }, + { + "type": "feature", + "category": "AWS IoT Events", + "description": "API update that allows users to add AWS Iot SiteWise actions while creating Detector Model in AWS Iot Events" + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "description": "You can now export an OpenAPI 3.0 compliant API definition file for Amazon API Gateway HTTP APIs using the Export API." + }, + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client", + "description": "The Amazon DynamoDB Enhanced Client is now generally available and provides a natural and intuitive interface for developers to integrate their applications with Amazon DynamoDB by means of an adaptive API that will map inputs and results to and from Java objects modeled by the application, rather than requiring the developers to implement that transformation themselves." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Synthetics", + "description": "Introducing CloudWatch Synthetics. This is the first public release of CloudWatch Synthetics." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Added a new ConnectionType \"KAFKA\" and a ConnectionProperty \"KAFKA_BOOTSTRAP_SERVERS\" to support Kafka connection." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.0.json b/.changes/2.13.0.json new file mode 100644 index 000000000000..905b2c86afc3 --- /dev/null +++ b/.changes/2.13.0.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.0", + "date": "2020-04-21", + "entries": [ + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "Cost Explorer Rightsizing Recommendations integrates with Compute Optimizer and begins offering across instance family rightsizing recommendations, adding to existing support for within instance family rightsizing recommendations." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updating dependency version: Jackson 2.10.0 -> 2.10.3, Jackson-annotations 2.9.0 -> 2.10.0." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Route 53 Domains", + "description": "You can now programmatically transfer domains between AWS accounts without having to contact AWS Support" + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "AWS GuardDuty now supports using AWS Organizations delegated administrators to create and manage GuardDuty master and member accounts. The feature also allows GuardDuty to be automatically enabled on associated organization accounts." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Bump minor version to '2.13.0-SNAPSHOT' because of upgrade of Jackson version." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Amazon EMR adds support for configuring a managed scaling policy for an Amazon EMR cluster. This enables automatic resizing of a cluster to optimize for job execution speed and reduced cluster cost." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.1.json b/.changes/2.13.1.json new file mode 100644 index 000000000000..2e9062c21aa5 --- /dev/null +++ b/.changes/2.13.1.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.1", + "date": "2020-04-22", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "description": "Add support for code review and recommendation feedback APIs." + }, + { + "type": "feature", + "category": "Firewall Management Service", + "description": "This release is to support AWS Firewall Manager policy with Organizational Unit scope." + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "description": "This change adds a new field 'OptionalDeployment' to ServiceSoftwareOptions to indicate whether a service software update is optional or mandatory. If True, it indicates that the update is optional, and the service software is not automatically updated. If False, the service software is automatically updated after AutomatedUpdateDate." + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "description": "Adding ServiceUnavailableException as one of the expected exceptions" + }, + { + "type": "feature", + "category": "Amazon Redshift", + "description": "Amazon Redshift support for usage limits" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.10.json b/.changes/2.13.10.json new file mode 100644 index 000000000000..5a3ec49b5bb3 --- /dev/null +++ b/.changes/2.13.10.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.10", + "date": "2020-05-05", + "entries": [ + { + "type": "feature", + "category": "AWS SDJ for Java v2", + "description": "Updating dependency version: Jackson 2.10.3 -> 2.10.4, and combine dependency Jackson-annotations with Jackson." + }, + { + "type": "feature", + "category": "AWS Support", + "description": "Documentation updates for support" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "With this release, you can call ModifySubnetAttribute with two new parameters: MapCustomerOwnedIpOnLaunch and CustomerOwnedIpv4Pool, to map a customerOwnedIpv4Pool to a subnet. You will also see these two new fields in the DescribeSubnets response. If your subnet has a customerOwnedIpv4Pool mapped, your network interface will get an auto assigned customerOwnedIpv4 address when placed onto an instance." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB", + "description": "Tweaked the javadocs for Get/Update, since it was previously wrongfully copied over from Delete and mentions the \"delete operation\"." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "AWS Systems Manager Parameter Store launches new data type to support aliases in EC2 APIs" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.11.json b/.changes/2.13.11.json new file mode 100644 index 000000000000..449231d9b4ec --- /dev/null +++ b/.changes/2.13.11.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.11", + "date": "2020-05-06", + "entries": [ + { + "type": "feature", + "category": "AWS CodeStar connections", + "description": "Added support for tagging resources in AWS CodeStar Connections" + }, + { + "type": "feature", + "category": "AWS Comprehend Medical", + "description": "New Batch Ontology APIs for ICD-10 and RxNorm will provide batch capability of linking the information extracted by Comprehend Medical to medical ontologies. The new ontology linking APIs make it easy to detect medications and medical conditions in unstructured clinical text and link them to RxNorm and ICD-10-CM codes respectively. This new feature can help you reduce the cost, time and effort of processing large amounts of unstructured medical text with high accuracy." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.12.json b/.changes/2.13.12.json new file mode 100644 index 000000000000..7b2c157f292c --- /dev/null +++ b/.changes/2.13.12.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.12", + "date": "2020-05-07", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "description": "Amazon CloudWatch Logs now offers the ability to interact with Logs Insights queries via the new PutQueryDefinition, DescribeQueryDefinitions, and DeleteQueryDefinition APIs." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "Add COMMIT_MESSAGE enum for webhook filter types" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon EC2 now adds warnings to identify issues when creating a launch template or launch template version." + }, + { + "type": "feature", + "category": "Amazon Route 53", + "description": "Amazon Route 53 now supports the EU (Milan) Region (eu-south-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "description": "This release adds support for the following options in instance public ports: Specify source IP addresses, specify ICMP protocol like PING, and enable/disable the Lightsail browser-based SSH and RDP clients' access to your instance." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "This Patch Manager release supports creating patch baselines for Oracle Linux and Debian" + }, + { + "type": "feature", + "category": "Amazon AppConfig", + "description": "The description of the AWS AppConfig GetConfiguration API action was amended to include important information about calling ClientConfigurationVersion when you configure clients to call GetConfiguration." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.13.json b/.changes/2.13.13.json new file mode 100644 index 000000000000..c073c66f9fe6 --- /dev/null +++ b/.changes/2.13.13.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.13", + "date": "2020-05-08", + "entries": [ + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "This release adds a new parameter (EnableInterContainerTrafficEncryption) to CreateProcessingJob API to allow for enabling inter-container traffic encryption on processing jobs." + }, + { + "type": "feature", + "category": "AWS Resource Groups Tagging API", + "description": "Documentation updates for resourcegroupstaggingapi" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "A helpful error message is now raised when an obviously-invalid region name is given to the SDK, instead of the previous NullPointerException. Fixes [#1642](https://github.com/aws/aws-sdk-java-v2/issues/1642)." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "Documentation updates for GuardDuty" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.14.json b/.changes/2.13.14.json new file mode 100644 index 000000000000..5f3cee6f8d07 --- /dev/null +++ b/.changes/2.13.14.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.14", + "date": "2020-05-11", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "M6g instances are our next-generation general purpose instances powered by AWS Graviton2 processors" + }, + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "description": "Add Bitbucket integration APIs" + }, + { + "type": "feature", + "category": "AWSKendraFrontendService", + "description": "Amazon Kendra is now generally available. As part of general availability, we are launching Metrics for query & storage utilization" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.15.json b/.changes/2.13.15.json new file mode 100644 index 000000000000..7d4bc3e225d2 --- /dev/null +++ b/.changes/2.13.15.json @@ -0,0 +1,16 @@ +{ + "version": "2.13.15", + "date": "2020-05-12", + "entries": [ + { + "type": "feature", + "category": "Amazon WorkMail", + "description": "Minor API fixes and updates to the documentation." + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "description": "Documentation updates for iot-bifrost" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.16.json b/.changes/2.13.16.json new file mode 100644 index 000000000000..70cf75d0eb56 --- /dev/null +++ b/.changes/2.13.16.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.16", + "date": "2020-05-13", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix a race condition in `FileAsyncResponseTransformer` where the future fails to complete when onComplete event is dispatched on the same thread that executed request" + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release introduces a new major version of the Amazon Macie API. You can use this version of the API to develop tools and applications that interact with the new Amazon Macie." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Amazon ElastiCache now supports auto-update of ElastiCache clusters after the \"recommended apply by date\" of service update has passed. ElastiCache will use your maintenance window to schedule the auto-update of applicable clusters. For more information, see https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Self-Service-Updates.html and https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Self-Service-Updates.html" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.17.json b/.changes/2.13.17.json new file mode 100644 index 000000000000..3813e0448731 --- /dev/null +++ b/.changes/2.13.17.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.17", + "date": "2020-05-14", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Support event streams that are shared between two operations." + }, + { + "type": "feature", + "category": "Amazon RDS", + "description": "Add SourceRegion to CopyDBClusterSnapshot and CreateDBCluster operations. As with CopyDBSnapshot and CreateDBInstanceReadReplica, specifying this field will automatically populate the PresignedURL field with a valid value." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Amazon EC2 now supports adding AWS resource tags for associations between VPCs and local gateways, at creation time." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix generation for operations that share an output shape." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Expose the `extendedRequestId` from `SdkServiceException`, so it can be provided to support to investigate issues." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix unmarshalling of events when structure member name and shape name mismatch." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "This release adds a new parameter (SupportedOsVersions) to the Components API. This parameter lists the OS versions supported by a component." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.18.json b/.changes/2.13.18.json new file mode 100644 index 000000000000..467682fdac88 --- /dev/null +++ b/.changes/2.13.18.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.18", + "date": "2020-05-15", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Allow event structures to be used as operation outputs outside of streaming contexts." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Starting today, you can stop the execution of Glue workflows that are running. AWS Glue workflows are directed acyclic graphs (DAGs) of Glue triggers, crawlers and jobs. Using a workflow, you can design a complex multi-job extract, transform, and load (ETL) activity that AWS Glue can execute and track as single entity." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "This release adds support for specifying an image manifest media type when pushing a manifest to Amazon ECR." + }, + { + "type": "feature", + "category": "AWS Security Token Service", + "description": "API updates for STS" + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "This release adds support for the following features: 1. DescribeType and ListTypeVersions APIs now output a field IsDefaultVersion, indicating if a version is the default version for its type; 2. Add StackRollbackComplete waiter feature to wait until stack status is UPDATE_ROLLBACK_COMPLETE; 3. Add paginators in DescribeAccountLimits, ListChangeSets, ListStackInstances, ListStackSetOperationResults, ListStackSetOperations, ListStackSets APIs." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix generation for services that contain operations with the same name as the service." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.19.json b/.changes/2.13.19.json new file mode 100644 index 000000000000..af01c484cf54 --- /dev/null +++ b/.changes/2.13.19.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.19", + "date": "2020-05-18", + "entries": [ + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "Documentation updates for Amazon Macie" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "description": "Documentation updates for dynamodb" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release adds support for specifying environment files to add environment variables to your containers." + }, + { + "type": "feature", + "category": "Amazon QLDB", + "description": "Amazon QLDB now supports Amazon Kinesis data streams. You can now emit QLDB journal data, via the new QLDB Streams feature, directly to Amazon Kinesis supporting event processing and analytics among related use cases." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release changes the RunInstances CLI and SDK's so that if you do not specify a client token, a randomly generated token is used for the request to ensure idempotency." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "Amazon Chime now supports redacting chat messages." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.2.json b/.changes/2.13.2.json new file mode 100644 index 000000000000..5ff3d4a2259b --- /dev/null +++ b/.changes/2.13.2.json @@ -0,0 +1,51 @@ +{ + "version": "2.13.2", + "date": "2020-04-23", + "entries": [ + { + "type": "feature", + "category": "AWS Transfer Family", + "description": "This release adds support for transfers over FTPS and FTP in and out of Amazon S3, which makes it easy to migrate File Transfer Protocol over SSL (FTPS) and FTP workloads to AWS, in addition to the existing support for Secure File Transfer Protocol (SFTP)." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Added AutomaticTapeCreation APIs" + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "This release of the Amazon Pinpoint API enhances support for sending campaigns through custom channels to locations such as AWS Lambda functions or web applications. Campaigns can now use CustomDeliveryConfiguration and CampaignCustomMessage to configure custom channel settings for a campaign." + }, + { + "type": "feature", + "category": "AWS Resource Access Manager", + "description": "AWS Resource Access Manager (RAM) provides a new ListResourceTypes action. This action lets you list the resource types that can be shared using AWS RAM." + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage VOD", + "description": "Adds tagging support for PackagingGroups, PackagingConfigurations, and Assets" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Adds support for AWS Local Zones, including a new optional parameter AvailabilityZoneGroup for the DescribeOrderableDBInstanceOptions operation." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "This release supports Auto Scaling in Amazon Keyspaces for Apache Cassandra." + }, + { + "type": "feature", + "category": "Amazon Kinesis Firehose", + "description": "You can now deliver streaming data to an Amazon Elasticsearch Service domain in an Amazon VPC. You can now compress streaming data delivered to S3 using Hadoop-Snappy in addition to Gzip, Zip and Snappy formats." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.20.json b/.changes/2.13.20.json new file mode 100644 index 000000000000..92ec01704b9c --- /dev/null +++ b/.changes/2.13.20.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.20", + "date": "2020-05-19", + "entries": [ + { + "type": "feature", + "category": "AWS Health APIs and Notifications", + "description": "Feature: Health: AWS Health added a new field to differentiate Public events from Account-Specific events in the API request and response. Visit https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html to learn more." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Endpoint discovery is now enabled by default for future services that will require it. A new method 'endpointDiscoveryEnabled' has been added to client builders that support endpoint discovery allowing a true or false value to be set. 'enableEndpointDiscovery' has been deprecated on the client builders as it is now superseded by 'endpointDiscoveryEnabled'." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "You can now receive Voice Connector call events through SNS or SQS." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds support for Federated Authentication via SAML-2.0 in AWS ClientVPN." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "Documentation updates for Amazon Transcribe." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.21.json b/.changes/2.13.21.json new file mode 100644 index 000000000000..aad080c3af8d --- /dev/null +++ b/.changes/2.13.21.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.21", + "date": "2020-05-20", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports the ability to ingest the content that is streaming from an AWS Elemental Link device: https://aws.amazon.com/medialive/features/link/. This release also adds support for SMPTE-2038 and input state waiters." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "For findings related to controls, the finding information now includes the reason behind the current status of the control. A new field for the findings original severity allows finding providers to use the severity values from the system they use to assign severity." + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "Amazon Chime enterprise account administrators can now set custom retention policies on chat data in the Amazon Chime application." + }, + { + "type": "feature", + "category": "AWS CodeDeploy", + "description": "Amazon ECS customers using application and network load balancers can use CodeDeploy BlueGreen hook to invoke a CloudFormation stack update. With this update you can view CloudFormation deployment and target details via existing APIs and use your stack Id to list or delete all deployments associated with the stack." + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "description": "This release adds support for vocabulary filtering in streaming with which you can filter unwanted words from the real-time transcription results. Visit https://docs.aws.amazon.com/transcribe/latest/dg/how-it-works.html to learn more." + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "List APIs for all resources now contain additional information: when a resource was created, last updated, and its current version number." + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "description": "Documentation updates for Application Auto Scaling" + }, + { + "type": "feature", + "category": "AWS Backup", + "description": "This release allows customers to enable or disable AWS Backup support for an AWS resource type. This release also includes new APIs, update-region-settings and describe-region-settings, which can be used to opt in to a specific resource type. For all current AWS Backup customers, the default settings enable support for EBS, EC2, StorageGateway, EFS, DDB and RDS resource types." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.22.json b/.changes/2.13.22.json new file mode 100644 index 000000000000..0d686bfd92d4 --- /dev/null +++ b/.changes/2.13.22.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.22", + "date": "2020-05-21", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed an issue where a service returning an unknown response event type would cause a failure." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Synthetics", + "description": "AWS CloudWatch Synthetics now supports configuration of allocated memory for a canary." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "CodeBuild adds support for tagging with report groups" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "From this release onwards ProvisionByoipCidr publicly supports IPv6. Updated ProvisionByoipCidr API to support tags for public IPv4 and IPv6 pools. Added NetworkBorderGroup to the DescribePublicIpv4Pools response." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "description": "Deprecates unusable input members bound to Content-MD5 header. Updates example and documentation." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.23.json b/.changes/2.13.23.json new file mode 100644 index 000000000000..76f107963e0a --- /dev/null +++ b/.changes/2.13.23.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.23", + "date": "2020-05-22", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS IoT SiteWise", + "description": "This release adds support for the standard deviation auto-computed aggregate and improved support for portal logo images in SiteWise." + }, + { + "type": "feature", + "category": "Auto Scaling", + "description": "Documentation updates for Amazon EC2 Auto Scaling" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.24.json b/.changes/2.13.24.json new file mode 100644 index 000000000000..347220932f55 --- /dev/null +++ b/.changes/2.13.24.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.24", + "date": "2020-05-26", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "ebsOptimizedInfo, efaSupported and supportedVirtualizationTypes added to DescribeInstanceTypes API" + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "Allowing cron expression in the DLM policy creation schedule." + }, + { + "type": "feature", + "category": "Amazon Macie", + "description": "This is a documentation-only update to the Amazon Macie Classic API. This update corrects out-of-date references to the service name." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Amazon ElastiCache now allows you to use resource based policies to manage access to operations performed on ElastiCache resources. Also, Amazon ElastiCache now exposes ARN (Amazon Resource Names) for ElastiCache resources such as Cache Clusters and Parameter Groups. ARNs can be used to apply IAM policies to ElastiCache resources." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Add DataSetArns to QuickSight DescribeDashboard API response." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "The AWS Systems Manager GetOpsSummary API action now supports multiple OpsResultAttributes in the request. Currently, this feature only supports OpsResultAttributes with the following TypeNames: [AWS:EC2InstanceComputeOptimizer] or [AWS:EC2InstanceInformation, AWS:EC2InstanceComputeOptimizer]. These TypeNames can be used along with either or both of the following: [AWS:EC2InstanceRecommendation, AWS:RecommendationSource]" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.25.json b/.changes/2.13.25.json new file mode 100644 index 000000000000..73ef7b4eecbb --- /dev/null +++ b/.changes/2.13.25.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.25", + "date": "2020-05-27", + "entries": [ + { + "type": "bugfix", + "category": "Amazon S3", + "description": "Check the `x-amz-content-range` header for `GetObject` responses when the `Content-Range` header is not returned by the service. Fixes [#1209](https://github.com/aws/aws-sdk-java-v2/issues/1209)." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "description": "This release added support for HTTP/2 ALPN preference lists for Network Load Balancers" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "Documentation updates for GuardDuty" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.26.json b/.changes/2.13.26.json new file mode 100644 index 000000000000..cd65b21304ed --- /dev/null +++ b/.changes/2.13.26.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.26", + "date": "2020-05-28", + "entries": [ + { + "type": "feature", + "category": "Amazon QLDB Session", + "description": "Documentation updates for Amazon QLDB Session" + }, + { + "type": "feature", + "category": "Amazon WorkMail", + "description": "This release adds support for Amazon WorkMail organization-level retention policies." + }, + { + "type": "feature", + "category": "AWS Marketplace Catalog Service", + "description": "AWS Marketplace Catalog now supports accessing initial change payloads with DescribeChangeSet operation." + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "description": "New APIs for upgrading the Apache Kafka version of a cluster and to find out compatible upgrade paths" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.27.json b/.changes/2.13.27.json new file mode 100644 index 000000000000..f546be4f0bf5 --- /dev/null +++ b/.changes/2.13.27.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.27", + "date": "2020-06-01", + "entries": [ + { + "type": "feature", + "category": "AWS Key Management Service", + "description": "AWS Key Management Service (AWS KMS): If the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext APIs are called on a CMK in a custom key store (origin == AWS_CLOUDHSM), they return an UnsupportedOperationException. If a call to UpdateAlias causes a customer to exceed the Alias resource quota, the UpdateAlias API returns a LimitExceededException." + }, + { + "type": "feature", + "category": "Amazon Athena", + "description": "This release adds support for connecting Athena to your own Apache Hive Metastores in addition to the AWS Glue Data Catalog. For more information, please see https://docs.aws.amazon.com/athena/latest/ug/connect-to-data-source-hive.html" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Amazon EMR now supports encrypting log files with AWS Key Management Service (KMS) customer managed keys." + }, + { + "type": "feature", + "category": "AWS Maven Lambda Archetype", + "description": "Updated the `archetype-lambda` to generate SDK client that uses region from environment variable." + }, + { + "type": "feature", + "category": "Amazon WorkLink", + "description": "Amazon WorkLink now supports resource tagging for fleets." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "We are releasing HumanTaskUiArn as a new parameter in CreateLabelingJob and RenderUiTemplate which can take an ARN for a system managed UI to render a task." + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "New capabilities to update storage capacity and throughput capacity of your file systems, providing the flexibility to grow file storage and to scale up or down the available performance as needed to meet evolving storage needs over time." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.28.json b/.changes/2.13.28.json new file mode 100644 index 000000000000..5d785c1b88dd --- /dev/null +++ b/.changes/2.13.28.json @@ -0,0 +1,11 @@ +{ + "version": "2.13.28", + "date": "2020-06-02", + "entries": [ + { + "type": "feature", + "category": "Amazon GuardDuty", + "description": "Amazon GuardDuty findings now include S3 bucket details under the resource section if an S3 Bucket was one of the affected resources" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.29.json b/.changes/2.13.29.json new file mode 100644 index 000000000000..46f23d0e5331 --- /dev/null +++ b/.changes/2.13.29.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.29", + "date": "2020-06-03", + "entries": [ + { + "type": "feature", + "category": "AWS Identity and Access Management", + "description": "GenerateServiceLastAccessedDetails will now return ActionLastAccessed details for certain S3 control plane actions" + }, + { + "type": "feature", + "category": "Amazon Elasticsearch Service", + "description": "Amazon Elasticsearch Service now offers support for cross-cluster search, enabling you to perform searches, aggregations, and visualizations across multiple Amazon Elasticsearch Service domains with a single query or from a single Kibana interface. New feature includes the ability to setup connection, required to perform cross-cluster search, between domains using an approval workflow." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "Adding databaseName in the response for GetUserDefinedFunctions() API." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for the encoding of VP8 or VP9 video in WebM container with Vorbis or Opus audio." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Direct Connect", + "description": "This release supports the virtual interface failover test, which allows you to verify that traffic routes over redundant virtual interfaces when you bring your primary virtual interface out of service." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "This release improves the Multi-AZ feature in ElastiCache by adding a separate flag and proper validations." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.3.json b/.changes/2.13.3.json new file mode 100644 index 000000000000..d2a6ac7ea768 --- /dev/null +++ b/.changes/2.13.3.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.3", + "date": "2020-04-24", + "entries": [ + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "Enable 1hour frequency in the schedule creation for Data LifeCycle Manager." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "This release adds a new exception type to the AWS IoT SetV2LoggingLevel API." + }, + { + "type": "feature", + "category": "Amazon Elastic Inference", + "description": "This feature allows customers to describe the accelerator types and offerings on any region where Elastic Inference is available." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fixed bean-style setter names on serializable builders to match bean-style getter names." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.30.json b/.changes/2.13.30.json new file mode 100644 index 000000000000..24c4bca4a469 --- /dev/null +++ b/.changes/2.13.30.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.30", + "date": "2020-06-04", + "entries": [ + { + "type": "feature", + "category": "AWSMarketplace Metering", + "description": "Documentation updates for meteringmarketplace" + }, + { + "type": "feature", + "category": "AWS Elemental MediaPackage VOD", + "description": "You can now restrict direct access to AWS Elemental MediaPackage by securing requests for VOD content using CDN authorization. With CDN authorization, content requests require a specific HTTP header and authorization code." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "description": "This release adds the BurstCapacityPercentage and BurstCapacityTime instance metrics, which allow you to track the burst capacity available to your instance." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "New C5a instances, the latest generation of EC2's compute-optimized instances featuring AMD's 2nd Generation EPYC processors. C5a instances offer up to 96 vCPUs, 192 GiB of instance memory, 20 Gbps in Network bandwidth; New G4dn.metal bare metal instance with 8 NVIDIA T4 GPUs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "SSM State Manager support for executing an association only at specified CRON schedule after creating/updating an association." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.31.json b/.changes/2.13.31.json new file mode 100644 index 000000000000..5d5cabdf1b83 --- /dev/null +++ b/.changes/2.13.31.json @@ -0,0 +1,51 @@ +{ + "version": "2.13.31", + "date": "2020-06-05", + "entries": [ + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Amazon CloudFront adds support for configurable origin connection attempts and origin connection timeout." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "This release adds support for DescribeProduct and DescribeProductAsAdmin by product name, DescribeProvisioningArtifact by product name or provisioning artifact name, returning launch paths as part of DescribeProduct output and adds maximum length for provisioning artifact name and provisioning artifact description." + }, + { + "type": "feature", + "category": "Amazon Personalize Runtime", + "description": "[Personalize] Adds ability to apply filter to real-time recommendations" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "description": "Amazon API Gateway now allows customers of REST APIs to skip trust chain validation for backend server certificates for HTTP and VPC Link Integration. This feature enables customers to configure their REST APIs to integrate with backends that are secured with certificates vended from private certificate authorities (CA) or certificates that are self-signed." + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "description": "This release enables additional functionality for the Amazon Pinpoint journeys feature. With this release, you can send messages through additional channels, including SMS, push notifications, and custom channels." + }, + { + "type": "feature", + "category": "Amazon Personalize", + "description": "[Personalize] Adds ability to create and apply filters." + }, + { + "type": "feature", + "category": "Amazon SageMaker Runtime", + "description": "You can now specify the production variant to send the inference request to, when invoking a SageMaker Endpoint that is running two or more variants." + }, + { + "type": "feature", + "category": "AWS Elastic Beanstalk", + "description": "These API changes enable an IAM user to associate an operations role with an Elastic Beanstalk environment, so that the IAM user can call Elastic Beanstalk actions without having access to underlying downstream AWS services that these actions call." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.32.json b/.changes/2.13.32.json new file mode 100644 index 000000000000..c026b2237a9c --- /dev/null +++ b/.changes/2.13.32.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.32", + "date": "2020-06-08", + "entries": [ + { + "type": "feature", + "category": "AWS Cloud Map", + "description": "Added support for tagging Service and Namespace type resources in Cloud Map" + }, + { + "type": "feature", + "category": "AWS Shield", + "description": "This release adds the option for customers to identify a contact name and method that the DDoS Response Team can proactively engage when a Route 53 Health Check that is associated with a Shield protected resource fails." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Update javadoc annotation for AwsBasicCredentials" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.33.json b/.changes/2.13.33.json new file mode 100644 index 000000000000..655146bc5866 --- /dev/null +++ b/.changes/2.13.33.json @@ -0,0 +1,16 @@ +{ + "version": "2.13.33", + "date": "2020-06-09", + "entries": [ + { + "type": "feature", + "category": "AWS Transfer Family", + "description": "This release updates the API so customers can test use of Source IP to allow, deny or limit access to data in their S3 buckets after integrating their identity provider." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.34.json b/.changes/2.13.34.json new file mode 100644 index 000000000000..b58df9e2886f --- /dev/null +++ b/.changes/2.13.34.json @@ -0,0 +1,51 @@ +{ + "version": "2.13.34", + "date": "2020-06-10", + "entries": [ + { + "type": "feature", + "category": "AWS Compute Optimizer", + "description": "Compute Optimizer supports exporting recommendations to Amazon S3." + }, + { + "type": "feature", + "category": "Amazon AppConfig", + "description": "This release allows customers to choose from a list of predefined deployment strategies while starting deployments." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "New C6g instances powered by AWS Graviton2 processors and ideal for running advanced, compute-intensive workloads; New R6g instances powered by AWS Graviton2 processors and ideal for running memory-intensive workloads." + }, + { + "type": "feature", + "category": "Amazon Lightsail", + "description": "Documentation updates for lightsail" + }, + { + "type": "feature", + "category": "AWS Shield", + "description": "Corrections to the supported format for contact phone numbers and to the description for the create subscription action." + }, + { + "type": "feature", + "category": "Amazon Data Lifecycle Manager", + "description": "Reducing the schedule name of DLM Lifecycle policy from 500 to 120 characters." + }, + { + "type": "feature", + "category": "CodeArtifact", + "description": "Added support for AWS CodeArtifact." + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This release of the Amazon Macie API removes support for the ArchiveFindings and UnarchiveFindings operations. This release also adds UNKNOWN as an encryption type for S3 bucket metadata." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "description": "Service Catalog Documentation Update for Integration with AWS Organizations Delegated Administrator feature" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.35.json b/.changes/2.13.35.json new file mode 100644 index 000000000000..8b23113e363c --- /dev/null +++ b/.changes/2.13.35.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.35", + "date": "2020-06-11", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Avoid unnecessary copying in `AsyncRequestBody.fromBytes()`" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS IoT Data Plane", + "description": "As part of this release, we are introducing a new feature called named shadow, which extends the capability of AWS IoT Device Shadow to support multiple shadows for a single IoT device. With this release, customers can store different device state data into different shadows, and as a result access only the required state data when needed and reduce individual shadow size." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building Service", + "description": "This change adds the built-in AMAZON.KendraSearchIntent that enables integration with Amazon Kendra." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "description": "This release adds support for deleting capacity providers." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "EC2 Image Builder now supports specifying a custom working directory for your build and test workflows. In addition, Image Builder now supports defining tags that are applied to ephemeral resources created by EC2 Image Builder as part of the image creation workflow." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.36.json b/.changes/2.13.36.json new file mode 100644 index 000000000000..199f1a1ed4da --- /dev/null +++ b/.changes/2.13.36.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.36", + "date": "2020-06-12", + "entries": [ + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "The following parameters now return the organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets: the OrganizationalUnitIds parameter on StackSet and the OrganizationalUnitId parameter on StackInstance, StackInstanceSummary, and StackSetOperationResultSummary" + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "You can now choose to crawl the entire table or just a sample of records in DynamoDB when using AWS Glue crawlers. Additionally, you can also specify a scanning rate for crawling DynamoDB tables." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2 DynamoDB Enhanced Client", + "description": "Added ClientRequestToken in class TransactWriteItemsEnhancedRequest." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Display EndpointType in DescribeGatewayInformation" + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "description": "Documentation updates for Amazon API Gateway" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.37.json b/.changes/2.13.37.json new file mode 100644 index 000000000000..283abad01548 --- /dev/null +++ b/.changes/2.13.37.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.37", + "date": "2020-06-15", + "entries": [ + { + "type": "feature", + "category": "Amazon Chime", + "description": "feature: Chime: This release introduces the ability to create an AWS Chime SDK meeting with attendees." + }, + { + "type": "feature", + "category": "Alexa For Business", + "description": "Adding support for optional tags in CreateBusinessReportSchedule, CreateProfile and CreateSkillGroup APIs" + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "Updated all AuthParameters to be sensitive." + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "Added support for job executions rollout configuration, job abort configuration, and job executions timeout configuration for AWS IoT Over-the-Air (OTA) Update Feature." + }, + { + "type": "feature", + "category": "Amazon AppConfig", + "description": "This release adds a hosted configuration source provider. Customers can now store their application configurations directly in AppConfig, without the need for an external configuration source." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.38.json b/.changes/2.13.38.json new file mode 100644 index 000000000000..04497c4057d3 --- /dev/null +++ b/.changes/2.13.38.json @@ -0,0 +1,41 @@ +{ + "version": "2.13.38", + "date": "2020-06-16", + "entries": [ + { + "type": "feature", + "category": "Auto Scaling", + "description": "Introducing instance refresh, a feature that helps you update all instances in an Auto Scaling group in a rolling fashion (for example, to apply a new AMI or instance type). You can control the pace of the refresh by defining the percentage of the group that must remain running/healthy during the replacement process and the time for new instances to warm up between replacements." + }, + { + "type": "feature", + "category": "AWS Data Exchange", + "description": "This release fixes a bug in the AWS Data Exchange Python and NodeJS SDKs. The 'KmsKeyArn' field in the create-job API was configured to be required instead of optional. We updated this field to be optional in this release." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Documentation updates for CloudFront" + }, + { + "type": "feature", + "category": "Amazon Polly", + "description": "Amazon Polly adds new US English child voice - Kevin. Kevin is available as Neural voice only." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Adds support for using Amazon Elastic File System (persistent storage) with AWS Lambda. This enables customers to share data across function invocations, read large reference data files, and write function output to a persistent and shared store." + }, + { + "type": "feature", + "category": "Amazon QLDB", + "description": "Documentation updates for Amazon QLDB" + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Fix an NPE in `OptionalAttributeConverter` that can happen the if the `nul()` property of the `AttributeValue` is `null`." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.39.json b/.changes/2.13.39.json new file mode 100644 index 000000000000..0da636d54ca1 --- /dev/null +++ b/.changes/2.13.39.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.39", + "date": "2020-06-17", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "nvmeSupport added to DescribeInstanceTypes API" + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "Adds support for route and virtual node listener timeouts." + }, + { + "type": "feature", + "category": "Amazon Route 53", + "description": "Add PriorRequestNotComplete exception to AssociateVPCWithHostedZone API" + }, + { + "type": "feature", + "category": "Amazon Macie 2", + "description": "This is a documentation-only update to the Amazon Macie API. This update contains miscellaneous editorial improvements to various API descriptions." + }, + { + "type": "feature", + "category": "Amazon Import/Export Snowball", + "description": "AWS Snowcone is a portable, rugged and secure device for edge computing and data transfer. You can use Snowcone to collect, process, and move data to AWS, either offline by shipping the device to AWS or online by using AWS DataSync. With 2 CPUs and 4 GB RAM of compute and 8 TB of storage, Snowcone can run edge computing workloads and store data securely. Snowcone's small size (8.94\" x 5.85\" x 3.25\" / 227 mm x 148.6 mm x 82.65 mm) allows you to set it next to machinery in a factory. Snowcone weighs about 4.5 lbs. (2 kg), so you can carry one in a backpack, use it with battery-based operation, and use the Wi-Fi interface to gather sensor data. Snowcone supports a file interface with NFS support." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.4.json b/.changes/2.13.4.json new file mode 100644 index 000000000000..9a1fc0862c0a --- /dev/null +++ b/.changes/2.13.4.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.4", + "date": "2020-04-27", + "entries": [ + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "Adding minimum replication engine version for describe-endpoint-types api." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Various performance improvements." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "Change to the input, ResourceSpec, changing EnvironmentArn to SageMakerImageArn. This affects the following preview APIs: CreateDomain, DescribeDomain, UpdateDomain, CreateUserProfile, DescribeUserProfile, UpdateUserProfile, CreateApp and DescribeApp." + }, + { + "type": "feature", + "category": "AWS Data Exchange", + "description": "This release introduces AWS Data Exchange support for configurable encryption parameters when exporting data sets to Amazon S3." + }, + { + "type": "feature", + "category": "Access Analyzer", + "description": "This release adds support for inclusion of S3 Access Point policies in IAM Access Analyzer evaluation of S3 bucket access. IAM Access Analyzer now reports findings for buckets shared through access points and identifies the access point that permits access." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.40.json b/.changes/2.13.40.json new file mode 100644 index 000000000000..6c93707308b9 --- /dev/null +++ b/.changes/2.13.40.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.40", + "date": "2020-06-18", + "entries": [ + { + "type": "feature", + "category": "AWS Support", + "description": "Documentation updates for support" + }, + { + "type": "feature", + "category": "AWSMarketplace Metering", + "description": "Documentation updates for meteringmarketplace" + }, + { + "type": "feature", + "category": "Amazon Route 53", + "description": "Added a new ListHostedZonesByVPC API for customers to list all the private hosted zones that a specified VPC is associated with." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for NexGuard FileMarker SDK, which allows NexGuard partners to watermark proprietary content in mezzanine and OTT streaming contexts." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Adding support for global write forwarding on secondary clusters in an Aurora global database." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Added offset support for specifying the number of days to wait after the date and time specified by a CRON expression before running the maintenance window." + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "description": "You can now configure Amazon SES to send event notifications when the delivery of an email is delayed because of a temporary issue. For example, you can receive a notification if the recipient's inbox is full, or if there's a temporary problem with the receiving email server." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.41.json b/.changes/2.13.41.json new file mode 100644 index 000000000000..b9fe63843fa2 --- /dev/null +++ b/.changes/2.13.41.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.41", + "date": "2020-06-19", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Adds support to tag elastic-gpu on the RunInstances api" + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Documentation updates for elasticache" + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports Input Prepare schedule actions. This feature improves existing input switching by allowing users to prepare an input prior to switching to it." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS OpsWorks CM", + "description": "Documentation updates for AWS OpsWorks CM." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.42.json b/.changes/2.13.42.json new file mode 100644 index 000000000000..822c23f0ec8e --- /dev/null +++ b/.changes/2.13.42.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.42", + "date": "2020-06-22", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "This release adds Tag On Create feature support for the ImportImage, ImportSnapshot, ExportImage and CreateInstanceExportTask APIs." + }, + { + "type": "feature", + "category": "Amazon Simple Queue Service", + "description": "AWS SQS adds pagination support for ListQueues and ListDeadLetterSourceQueues APIs" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Added paginators for various APIs." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Adding support for MaximumCoreCapacityUnits parameter for EMR Managed Scaling. It allows users to control how many units/nodes are added to the CORE group/fleet. Remaining units/nodes are added to the TASK groups/fleet in the cluster." + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "description": "This update adds the ability to detect black frames, end credits, shots, and color bars in stored videos" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.43.json b/.changes/2.13.43.json new file mode 100644 index 000000000000..efc5877aa6c9 --- /dev/null +++ b/.changes/2.13.43.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.43", + "date": "2020-06-23", + "entries": [ + { + "type": "feature", + "category": "AWS MediaTailor", + "description": "AWS Elemental MediaTailor SDK now allows configuration of Bumper." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "Added a new error message to support the requirement for a Business License on AWS accounts in China to create an organization." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.44.json b/.changes/2.13.44.json new file mode 100644 index 000000000000..bb9816351c6d --- /dev/null +++ b/.changes/2.13.44.json @@ -0,0 +1,56 @@ +{ + "version": "2.13.44", + "date": "2020-06-24", + "entries": [ + { + "type": "feature", + "category": "Auto Scaling", + "description": "Documentation updates for Amazon EC2 Auto Scaling." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "This release adds support for a new backup policy type for AWS Organizations." + }, + { + "type": "feature", + "category": "AWS CodeCommit", + "description": "This release introduces support for reactions to CodeCommit comments. Users will be able to select from a pre-defined list of emojis to express their reaction to any comments." + }, + { + "type": "feature", + "category": "AWS Identity and Access Management", + "description": "Documentation updates for iam" + }, + { + "type": "feature", + "category": "AWS Amplify", + "description": "This release of AWS Amplify Console introduces support for automatically creating custom subdomains for branches based on user-defined glob patterns, as well as automatically cleaning up Amplify branches when their corresponding git branches are deleted." + }, + { + "type": "feature", + "category": "AWS Backup", + "description": "Customers can now manage and monitor their backups in a policied manner across their AWS accounts, via an integration between AWS Backup and AWS Organizations" + }, + { + "type": "feature", + "category": "Amazon Honeycode", + "description": "Introducing Amazon Honeycode - a fully managed service that allows you to quickly build mobile and web apps for teams without programming." + }, + { + "type": "feature", + "category": "Amazon Elastic MapReduce", + "description": "Amazon EMR customers can now set allocation strategies for On-Demand and Spot instances in their EMR clusters with instance fleets. These allocation strategies use real-time capacity insights to provision clusters faster and make the most efficient use of available spare capacity to allocate Spot instances to reduce interruptions." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon FSx", + "description": "This release adds the capability to take highly-durable, incremental backups of your FSx for Lustre persistent file systems. This capability makes it easy to further protect your file system data and to meet business and regulatory compliance requirements." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.45.json b/.changes/2.13.45.json new file mode 100644 index 000000000000..5886730a8d09 --- /dev/null +++ b/.changes/2.13.45.json @@ -0,0 +1,26 @@ +{ + "version": "2.13.45", + "date": "2020-06-25", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Added support for tag-on-create for Host Reservations in Dedicated Hosts. You can now specify tags when you create a Host Reservation for a Dedicated Host. For more information about tagging, see AWS Tagging Strategies." + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "This release adds new APIs to support column level statistics in AWS Glue Data Catalog" + }, + { + "type": "bugfix", + "category": "AWS DynamoDB Enhanced Client", + "description": "Fixed a bug causing a NullPointerException to be thrown in the enhanced DeleteItem operation if a conditionExpression was given with null attributeNames or null attributeValues." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.46.json b/.changes/2.13.46.json new file mode 100644 index 000000000000..684303a46f36 --- /dev/null +++ b/.changes/2.13.46.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.46", + "date": "2020-06-26", + "entries": [ + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "description": "Don't require Authorization for InitiateAuth and RespondToAuthChallenge." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Added support for cross-region DataSource credentials copying." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "description": "This release contains miscellaneous API documentation updates for AWS DMS in response to several customer reported issues." + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "description": "ListStackInstances and DescribeStackInstance now return a new `StackInstanceStatus` object that contains `DetailedStatus` values: a disambiguation of the more generic `Status` value. ListStackInstances output can now be filtered on `DetailedStatus` using the new `Filters` parameter." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "The new 'ModelClientConfig' parameter being added for CreateTransformJob and DescribeTransformJob api actions enable customers to configure model invocation related parameters such as timeout and retry." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.47.json b/.changes/2.13.47.json new file mode 100644 index 000000000000..aa613631389c --- /dev/null +++ b/.changes/2.13.47.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.47", + "date": "2020-06-29", + "entries": [ + { + "type": "feature", + "category": "Auto Scaling", + "description": "Documentation updates for Amazon EC2 Auto Scaling." + }, + { + "type": "feature", + "category": "Amazon CodeGuru Profiler", + "description": "Amazon CodeGuru Profiler is now generally available. The Profiler helps developers to optimize their software, troubleshoot issues in production, and identify their most expensive lines of code. As part of general availability, we are launching: Profiling of AWS Lambda functions, Anomaly detection in CPU profiles, Color My Code on flame graphs, Expanding presence to 10 AWS regions." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Virtual Private Cloud (VPC) customers can now create and manage their own Prefix Lists to simplify VPC configurations." + }, + { + "type": "feature", + "category": "AWS CodeStar connections", + "description": "Updated and new APIs in support of hosts for connections to installed provider types. New integration with the GitHub Enterprise Server provider type." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.48.json b/.changes/2.13.48.json new file mode 100644 index 000000000000..56384c1db86a --- /dev/null +++ b/.changes/2.13.48.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.48", + "date": "2020-06-30", + "entries": [ + { + "type": "feature", + "category": "Amazon CodeGuru Reviewer", + "description": "Release GitHub Enterprise Server source provider integration" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "description": "Fix marshaller binding for input event streams when member name and shape name are not equal." + }, + { + "type": "feature", + "category": "AWS Comprehend Medical", + "description": "This release adds the relationships between MedicalCondition and Anatomy in DetectEntitiesV2 API." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Documentation updates for rds" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "Added support for tag-on-create for CreateVpc, CreateEgressOnlyInternetGateway, CreateSecurityGroup, CreateSubnet, CreateNetworkInterface, CreateNetworkAcl, CreateDhcpOptions and CreateInternetGateway. You can now specify tags when creating any of these resources. For more information about tagging, see AWS Tagging Strategies." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "Add a new parameter (ImageDigest) and a new exception (ImageDigestDoesNotMatchException) to PutImage API to support pushing image by digest." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.49.json b/.changes/2.13.49.json new file mode 100644 index 000000000000..cc67573cc5bd --- /dev/null +++ b/.changes/2.13.49.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.49", + "date": "2020-07-01", + "entries": [ + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "This release adds the exceptions KMSKeyNotAccessibleFault and InvalidDBClusterStateFault to the Amazon RDS ModifyDBInstance API." + }, + { + "type": "feature", + "category": "AWS CodeBuild", + "description": "Support build status config in project source" + }, + { + "type": "feature", + "category": "Amazon Chime", + "description": "This release supports third party emergency call routing configuration for Amazon Chime Voice Connectors." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "description": "This release adds additional details for findings. There are now finding details for auto scaling groups, EC2 volumes, and EC2 VPCs. You can identify detected vulnerabilities and provide related network paths." + }, + { + "type": "feature", + "category": "EC2 Image Builder", + "description": "EC2 Image Builder adds support for encrypted AMI distribution." + }, + { + "type": "feature", + "category": "AWS AppSync", + "description": "AWS AppSync supports new 12xlarge instance for server-side API caching" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.5.json b/.changes/2.13.5.json new file mode 100644 index 000000000000..93ba9eed939a --- /dev/null +++ b/.changes/2.13.5.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.5", + "date": "2020-04-28", + "entries": [ + { + "type": "feature", + "category": "Amazon Kinesis Video Streams", + "description": "Add \"GET_CLIP\" to the list of supported API names for the GetDataEndpoint API." + }, + { + "type": "feature", + "category": "Amazon Kinesis Video Streams Archived Media", + "description": "Add support for the GetClip API for retrieving media from a video stream in the MP4 format." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhacned", + "description": "Fix NPE on EnhancedType, created with documentOf, when calling innerToString" + }, + { + "type": "feature", + "category": "Amazon Route 53", + "description": "Amazon Route 53 now supports the Africa (Cape Town) Region (af-south-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Registry", + "description": "This release adds support for multi-architecture images also known as a manifest list" + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "SSM State Manager support for adding list association filter for Resource Group and manual mode of managing compliance for an association." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "description": "AWS Elemental MediaLive now supports several new features: enhanced VQ for H.264 (AVC) output encodes; passthrough of timed metadata and of Nielsen ID3 metadata in fMP4 containers in HLS outputs; the ability to generate a SCTE-35 sparse track without additional segmentation, in Microsoft Smooth outputs; the ability to select the audio from a TS input by specifying the audio track; and conversion of HDR colorspace in the input to an SDR colorspace in the output." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.50.json b/.changes/2.13.50.json new file mode 100644 index 000000000000..8003dfb2ca88 --- /dev/null +++ b/.changes/2.13.50.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.50", + "date": "2020-07-02", + "entries": [ + { + "type": "feature", + "category": "Amazon ElastiCache", + "description": "Documentation updates for elasticache" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "description": "Documentation updates for Amazon Connect." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.51.json b/.changes/2.13.51.json new file mode 100644 index 000000000000..5b99c518dd98 --- /dev/null +++ b/.changes/2.13.51.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.51", + "date": "2020-07-06", + "entries": [ + { + "type": "feature", + "category": "AWS IoT SiteWise", + "description": "This release supports optional start date and end date parameters for the GetAssetPropertyValueHistory API." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "description": "Add Theme APIs and update Dashboard APIs to support theme overrides." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "description": "Adds support for Amazon RDS on AWS Outposts." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.52.json b/.changes/2.13.52.json new file mode 100644 index 000000000000..d5a046c4561b --- /dev/null +++ b/.changes/2.13.52.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.52", + "date": "2020-07-07", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "DescribeAvailabilityZones now returns additional data about Availability Zones and Local Zones." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "description": "This release adds support for automatic backups of Amazon EFS file systems to further simplify backup management." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "This release includes the preview release of the client-side metrics for the AWS SDK for Java v2. The SPI can be found in the `metrics-spi` module, and this release also includes a metric publisher for CloudWatch in `cloudwatch-metric-publisher`. See our post over at the [AWS Developer Blog](https://aws.amazon.com/blogs/developer/category/developer-tools/aws-sdk-for-java/) for additional information." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Adding support for file-system driven directory refresh, Case Sensitivity toggle for SMB File Shares, and S3 Prefixes and custom File Share names" + }, + { + "type": "feature", + "category": "AWS Glue", + "description": "AWS Glue Data Catalog supports cross account sharing of tables through AWS Lake Formation" + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "description": "Amazon CloudFront adds support for a new security policy, TLSv1.2_2019." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Lake Formation", + "description": "AWS Lake Formation supports sharing tables with other AWS accounts and organizations" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.53.json b/.changes/2.13.53.json new file mode 100644 index 000000000000..9ca5887e8cf1 --- /dev/null +++ b/.changes/2.13.53.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.53", + "date": "2020-07-08", + "entries": [ + { + "type": "feature", + "category": "Amazon Forecast Service", + "description": "With this release, Amazon Forecast now supports the ability to add a tag to any resource via the launch of three new APIs: TagResouce, UntagResource and ListTagsForResource. A tag is a simple label consisting of a customer-defined key and an optional value allowing for easier resource management." + }, + { + "type": "feature", + "category": "AWS Organizations", + "description": "We have launched a self-service option to make it easier for customers to manage the use of their content by AI services. Certain AI services (Amazon CodeGuru Profiler, Amazon Comprehend, Amazon Lex, Amazon Polly, Amazon Rekognition, Amazon Textract, Amazon Transcribe, and Amazon Translate), may use content to improve the service. Customers have been able to opt out of this use by contacting AWS Support, and now they can opt out on a self-service basis by setting an Organizations policy for all or an individual AI service as listed above. Please refer to the technical documentation for more details." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "EC2 Spot now enables customers to tag their Spot Instances Requests on creation." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "description": "Customers can now see Instance Name alongside each rightsizing recommendation." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.54.json b/.changes/2.13.54.json new file mode 100644 index 000000000000..55d8ea3895fb --- /dev/null +++ b/.changes/2.13.54.json @@ -0,0 +1,76 @@ +{ + "version": "2.13.54", + "date": "2020-07-09", + "entries": [ + { + "type": "feature", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Support converting \"0\" and \"1\" numbers read from DynamoDB to Boolean and AtomicBoolean." + }, + { + "type": "feature", + "category": "Amazon Elastic Block Store", + "description": "This release introduces the following set of actions for the EBS direct APIs: 1. StartSnapshot, which creates a new Amazon EBS snapshot. 2. PutSnapshotBlock, which writes a block of data to a snapshot. 3. CompleteSnapshot, which seals and completes a snapshot after blocks of data have been written to it." + }, + { + "type": "feature", + "category": "Amazon Simple Notification Service", + "description": "This release adds support for SMS origination number as an attribute in the MessageAttributes parameter for the SNS Publish API." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Events", + "description": "Amazon CloudWatch Events/EventBridge adds support for API Gateway as a target." + }, + { + "type": "feature", + "category": "Alexa For Business", + "description": "Added support for registering an AVS device directly to a room using RegisterAVSDevice with a room ARN" + }, + { + "type": "feature", + "category": "AWS Secrets Manager", + "description": "Adds support for filters on the ListSecrets API to allow filtering results by name, tag key, tag value, or description. Adds support for the BlockPublicPolicy option on the PutResourcePolicy API to block resource policies which grant a wide range of IAM principals access to secrets. Adds support for the ValidateResourcePolicy API to validate resource policies for syntax and prevent lockout error scenarios and wide access to secrets." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "description": "Amazon EventBridge adds support for API Gateway as a target." + }, + { + "type": "feature", + "category": "AWS Amplify", + "description": "Documentation update to the introduction text to specify that this is the Amplify Console API." + }, + { + "type": "feature", + "category": "AWS CloudHSM V2", + "description": "Documentation updates for cloudhsmv2" + }, + { + "type": "feature", + "category": "Amazon Comprehend", + "description": "AWS Comprehend now supports Real-time Analysis with Custom Entity Recognition." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "description": "This release adds the DeleteHumanTaskUi API to Amazon Augmented AI" + }, + { + "type": "feature", + "category": "AWS App Mesh", + "description": "AppMesh now supports Ingress which allows resources outside a mesh to communicate to resources that are inside the mesh. See https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_gateways.html" + }, + { + "type": "feature", + "category": "AWS WAFV2", + "description": "Added the option to use IP addresses from an HTTP header that you specify, instead of using the web request origin. Available for IP set matching, geo matching, and rate-based rule count aggregation." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.55.json b/.changes/2.13.55.json new file mode 100644 index 000000000000..1d0467399bf3 --- /dev/null +++ b/.changes/2.13.55.json @@ -0,0 +1,16 @@ +{ + "version": "2.13.55", + "date": "2020-07-15", + "entries": [ + { + "type": "feature", + "category": "Amazon Interactive Video Service", + "description": "Introducing Amazon Interactive Video Service - a managed live streaming solution that is quick and easy to set up, and ideal for creating interactive video experiences." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.6.json b/.changes/2.13.6.json new file mode 100644 index 000000000000..d2e47da6c865 --- /dev/null +++ b/.changes/2.13.6.json @@ -0,0 +1,36 @@ +{ + "version": "2.13.6", + "date": "2020-04-29", + "entries": [ + { + "type": "feature", + "category": "AWS IoT SiteWise", + "description": "AWS IoT SiteWise is a managed service that makes it easy to collect, store, organize and monitor data from industrial equipment at scale. You can use AWS IoT SiteWise to model your physical assets, processes and facilities, quickly compute common industrial performance metrics, and create fully managed web applications to help analyze industrial equipment data, prevent costly equipment issues, and reduce production inefficiencies." + }, + { + "type": "feature", + "category": "AWS WAF Regional", + "description": "This release add migration API for AWS WAF Classic (\"waf\" and \"waf-regional\"). The migration API will parse through your web ACL and generate a CloudFormation template into your S3 bucket. Deploying this template will create equivalent web ACL under new AWS WAF (\"wafv2\")." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS WAF", + "description": "This release add migration API for AWS WAF Classic (\"waf\" and \"waf-regional\"). The migration API will parse through your web ACL and generate a CloudFormation template into your S3 bucket. Deploying this template will create equivalent web ACL under new AWS WAF (\"wafv2\")." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "description": "With this release, you can now use Amazon Transcribe to create medical custom vocabularies and use them in both medical real-time streaming and medical batch transcription jobs." + }, + { + "type": "feature", + "category": "AWS Cloud Map", + "description": "Documentation updates for servicediscovery" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.7.json b/.changes/2.13.7.json new file mode 100644 index 000000000000..2f0e8e0869dd --- /dev/null +++ b/.changes/2.13.7.json @@ -0,0 +1,46 @@ +{ + "version": "2.13.7", + "date": "2020-04-30", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "description": "AWS Elemental MediaConvert SDK has added support for including AFD signaling in MXF wrapper." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "description": "Adding support for S3_INTELLIGENT_TIERING as a storage class option" + }, + { + "type": "feature", + "category": "Schemas", + "description": "Add support for resource policies for Amazon EventBridge Schema Registry, which is now generally available." + }, + { + "type": "feature", + "category": "AWS IoT Events", + "description": "Doc only update to correct APIs and related descriptions" + }, + { + "type": "feature", + "category": "AWS IoT", + "description": "AWS IoT Core released Fleet Provisioning for scalable onboarding of IoT devices to the cloud. This release includes support for customer's Lambda functions to validate devices during onboarding. Fleet Provisioning also allows devices to send Certificate Signing Requests (CSR) to AWS IoT Core for signing and getting a unique certificate. Lastly, AWS IoT Core added a feature to register the same certificate for multiple accounts in the same region without needing to register the certificate authority (CA)." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Made OperationContext a public interface and moved it into public namespace as it was already exposed through another public interface. This will only impact extensions that have been written to reference the old internal-only class that should now switch to the approved stable public interface." + }, + { + "type": "feature", + "category": "AWS Lambda", + "description": "Documentation updates for Lambda" + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.8.json b/.changes/2.13.8.json new file mode 100644 index 000000000000..62b0bbab76b4 --- /dev/null +++ b/.changes/2.13.8.json @@ -0,0 +1,21 @@ +{ + "version": "2.13.8", + "date": "2020-05-01", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "description": "Change the TagKeys argument for UntagResource to a URL parameter to address an issue with the Java and .NET SDKs." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "description": "Added TimeoutSeconds as part of ListCommands API response." + } + ] +} \ No newline at end of file diff --git a/.changes/2.13.9.json b/.changes/2.13.9.json new file mode 100644 index 000000000000..8f1ef3329be9 --- /dev/null +++ b/.changes/2.13.9.json @@ -0,0 +1,31 @@ +{ + "version": "2.13.9", + "date": "2020-05-04", + "entries": [ + { + "type": "feature", + "category": "AWS SDK for Java v2", + "description": "Updated service endpoint metadata." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "description": "Amazon S3 Batch Operations now supports Object Lock." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "description": "With this release, you can include enriched metadata in Amazon Virtual Private Cloud (Amazon VPC) flow logs published to Amazon CloudWatch Logs or Amazon Simple Storage Service (S3). Prior to this, custom format VPC flow logs enriched with additional metadata could be published only to S3. With this launch, we are also adding additional metadata fields that provide insights about the location such as AWS Region, AWS Availability Zone, AWS Local Zone, AWS Wavelength Zone, or AWS Outpost where the network interface where flow logs are captured exists." + }, + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "description": "Bugfix for handling special characters ':' and '#' in attribute names" + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "description": "Documentation updates for Amazon API Gateway" + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/bugfix-AWSSDKforJavav2-9a322a2.json b/.changes/next-release/bugfix-AWSSDKforJavav2-9a322a2.json deleted file mode 100644 index ef986f986bfd..000000000000 --- a/.changes/next-release/bugfix-AWSSDKforJavav2-9a322a2.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "category": "AWS Common Runtime Client", - "type": "bugfix", - "description": "Upgrade to the latest version (0.3.35) of the AWS Common Runtime." -} diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md new file mode 100644 index 000000000000..fac624d37159 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -0,0 +1,15 @@ +--- +name: "\U0001F4D5 Documentation Issue" +about: Report an issue in the API Reference documentation or Developer Guide +labels: documentation, needs-triage +--- + + + +## Describe the issue + + +## Links + + + diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 43530d2152da..9492a02c8a94 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -30,7 +30,7 @@ labels: feature-request, needs-triage ## Your Environment - + * AWS Java SDK version used: * JDK version used: * Operating System and version: diff --git a/.github/ISSUE_TEMPLATE/general-issue.md b/.github/ISSUE_TEMPLATE/general-issue.md index aad931388a4c..52273edcc004 100644 --- a/.github/ISSUE_TEMPLATE/general-issue.md +++ b/.github/ISSUE_TEMPLATE/general-issue.md @@ -22,7 +22,7 @@ labels: guidance, needs-triage ## Your Environment - + * AWS Java SDK version used: * JDK version used: * Operating System and version: diff --git a/.github/workflows/stale-issue.yml b/.github/workflows/stale-issue.yml new file mode 100644 index 000000000000..4e487ef5c7a0 --- /dev/null +++ b/.github/workflows/stale-issue.yml @@ -0,0 +1,56 @@ +name: "Close stale issues" + +# Controls when the action will run. +on: + schedule: + - cron: "0/30 * * * *" + +jobs: + cleanup: + name: Stale issue job + runs-on: ubuntu-latest + steps: + - uses: aws-actions/stale-issue-cleanup@v3 + with: + # Setting messages to an empty string will cause the automation to skip + # that category + ancient-issue-message: This is a very old issue that is probably not getting as much + attention as it deserves. We encourage you to check if this is still an issue in + the latest release and if you find that this is still a problem, please feel free + to provide a comment or open a new issue. + stale-issue-message: It looks like this issue hasn’t been active in longer than a week. + In the absence of more information, we will be closing this issue soon. If you find + that this is still a problem, please add a comment to prevent automatic closure, or + if the issue is already closed please feel free to reopen it. + stale-pr-message: It looks like this PR hasn’t been active in longer than a week. In + the absence of more information, we will be closing this PR soon. Please add a + comment to prevent automatic closure, or if the PR is already closed please feel + free to open a new one. + + # These labels are required + stale-issue-label: closing-soon + exempt-issue-label: no-auto-closure + stale-pr-label: closing-soon + exempt-pr-label: no-auto-closure + response-requested-label: response-requested + + # Don't set closed-for-staleness label to skip closing very old issues + # regardless of label + closed-for-staleness-label: closed-for-staleness + + # Issue timing + days-before-stale: 7 + days-before-close: 4 + days-before-ancient: 1095 + + # If you don't want to mark a issue as being ancient based on a + # threshold of "upvotes", you can set this here. An "upvote" is + # the total number of +1, heart, hooray, and rocket reactions + # on an issue. + minimum-upvotes-to-exempt: 1 + + repo-token: ${{ secrets.GITHUB_TOKEN }} + loglevel: DEBUG + # Set dry-run to true to not perform label or close actions. + #dry-run: true + diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f30c0afcaf1..5dce96a1825a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,1516 @@ +# __2.13.55__ __2020-07-15__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Interactive Video Service__ + - ### Features + - Introducing Amazon Interactive Video Service - a managed live streaming solution that is quick and easy to set up, and ideal for creating interactive video experiences. + +# __2.13.54__ __2020-07-09__ +## __AWS Amplify__ + - ### Features + - Documentation update to the introduction text to specify that this is the Amplify Console API. + +## __AWS App Mesh__ + - ### Features + - AppMesh now supports Ingress which allows resources outside a mesh to communicate to resources that are inside the mesh. See https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_gateways.html + +## __AWS CloudHSM V2__ + - ### Features + - Documentation updates for cloudhsmv2 + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Secrets Manager__ + - ### Features + - Adds support for filters on the ListSecrets API to allow filtering results by name, tag key, tag value, or description. Adds support for the BlockPublicPolicy option on the PutResourcePolicy API to block resource policies which grant a wide range of IAM principals access to secrets. Adds support for the ValidateResourcePolicy API to validate resource policies for syntax and prevent lockout error scenarios and wide access to secrets. + +## __AWS WAFV2__ + - ### Features + - Added the option to use IP addresses from an HTTP header that you specify, instead of using the web request origin. Available for IP set matching, geo matching, and rate-based rule count aggregation. + +## __Alexa For Business__ + - ### Features + - Added support for registering an AVS device directly to a room using RegisterAVSDevice with a room ARN + +## __Amazon CloudWatch Events__ + - ### Features + - Amazon CloudWatch Events/EventBridge adds support for API Gateway as a target. + +## __Amazon Comprehend__ + - ### Features + - AWS Comprehend now supports Real-time Analysis with Custom Entity Recognition. + +## __Amazon DynamoDB Enhanced Client__ + - ### Features + - Support converting "0" and "1" numbers read from DynamoDB to Boolean and AtomicBoolean. + +## __Amazon Elastic Block Store__ + - ### Features + - This release introduces the following set of actions for the EBS direct APIs: 1. StartSnapshot, which creates a new Amazon EBS snapshot. 2. PutSnapshotBlock, which writes a block of data to a snapshot. 3. CompleteSnapshot, which seals and completes a snapshot after blocks of data have been written to it. + +## __Amazon EventBridge__ + - ### Features + - Amazon EventBridge adds support for API Gateway as a target. + +## __Amazon SageMaker Service__ + - ### Features + - This release adds the DeleteHumanTaskUi API to Amazon Augmented AI + +## __Amazon Simple Notification Service__ + - ### Features + - This release adds support for SMS origination number as an attribute in the MessageAttributes parameter for the SNS Publish API. + +# __2.13.53__ __2020-07-08__ +## __AWS Cost Explorer Service__ + - ### Features + - Customers can now see Instance Name alongside each rightsizing recommendation. + +## __AWS Organizations__ + - ### Features + - We have launched a self-service option to make it easier for customers to manage the use of their content by AI services. Certain AI services (Amazon CodeGuru Profiler, Amazon Comprehend, Amazon Lex, Amazon Polly, Amazon Rekognition, Amazon Textract, Amazon Transcribe, and Amazon Translate), may use content to improve the service. Customers have been able to opt out of this use by contacting AWS Support, and now they can opt out on a self-service basis by setting an Organizations policy for all or an individual AI service as listed above. Please refer to the technical documentation for more details. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - EC2 Spot now enables customers to tag their Spot Instances Requests on creation. + +## __Amazon Forecast Service__ + - ### Features + - With this release, Amazon Forecast now supports the ability to add a tag to any resource via the launch of three new APIs: TagResouce, UntagResource and ListTagsForResource. A tag is a simple label consisting of a customer-defined key and an optional value allowing for easier resource management. + +# __2.13.52__ __2020-07-07__ +## __AWS Glue__ + - ### Features + - AWS Glue Data Catalog supports cross account sharing of tables through AWS Lake Formation + +## __AWS Lake Formation__ + - ### Features + - AWS Lake Formation supports sharing tables with other AWS accounts and organizations + +## __AWS SDK for Java v2__ + - ### Features + - This release includes the preview release of the client-side metrics for the AWS SDK for Java v2. The SPI can be found in the `metrics-spi` module, and this release also includes a metric publisher for CloudWatch in `cloudwatch-metric-publisher`. See our post over at the [AWS Developer Blog](https://aws.amazon.com/blogs/developer/category/developer-tools/aws-sdk-for-java/) for additional information. + - Updated service endpoint metadata. + +## __AWS Storage Gateway__ + - ### Features + - Adding support for file-system driven directory refresh, Case Sensitivity toggle for SMB File Shares, and S3 Prefixes and custom File Share names + +## __Amazon CloudFront__ + - ### Features + - Amazon CloudFront adds support for a new security policy, TLSv1.2_2019. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - DescribeAvailabilityZones now returns additional data about Availability Zones and Local Zones. + +## __Amazon Elastic File System__ + - ### Features + - This release adds support for automatic backups of Amazon EFS file systems to further simplify backup management. + +# __2.13.51__ __2020-07-06__ +## __AWS IoT SiteWise__ + - ### Features + - This release supports optional start date and end date parameters for the GetAssetPropertyValueHistory API. + +## __Amazon QuickSight__ + - ### Features + - Add Theme APIs and update Dashboard APIs to support theme overrides. + +## __Amazon Relational Database Service__ + - ### Features + - Adds support for Amazon RDS on AWS Outposts. + +# __2.13.50__ __2020-07-02__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Connect Service__ + - ### Features + - Documentation updates for Amazon Connect. + +## __Amazon ElastiCache__ + - ### Features + - Documentation updates for elasticache + +# __2.13.49__ __2020-07-01__ +## __AWS AppSync__ + - ### Features + - AWS AppSync supports new 12xlarge instance for server-side API caching + +## __AWS CodeBuild__ + - ### Features + - Support build status config in project source + +## __AWS SecurityHub__ + - ### Features + - This release adds additional details for findings. There are now finding details for auto scaling groups, EC2 volumes, and EC2 VPCs. You can identify detected vulnerabilities and provide related network paths. + +## __Amazon Chime__ + - ### Features + - This release supports third party emergency call routing configuration for Amazon Chime Voice Connectors. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds the exceptions KMSKeyNotAccessibleFault and InvalidDBClusterStateFault to the Amazon RDS ModifyDBInstance API. + +## __EC2 Image Builder__ + - ### Features + - EC2 Image Builder adds support for encrypted AMI distribution. + +# __2.13.48__ __2020-06-30__ +## __AWS Comprehend Medical__ + - ### Features + - This release adds the relationships between MedicalCondition and Anatomy in DetectEntitiesV2 API. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix marshaller binding for input event streams when member name and shape name are not equal. + +## __Amazon CodeGuru Reviewer__ + - ### Features + - Release GitHub Enterprise Server source provider integration + +## __Amazon EC2 Container Registry__ + - ### Features + - Add a new parameter (ImageDigest) and a new exception (ImageDigestDoesNotMatchException) to PutImage API to support pushing image by digest. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Added support for tag-on-create for CreateVpc, CreateEgressOnlyInternetGateway, CreateSecurityGroup, CreateSubnet, CreateNetworkInterface, CreateNetworkAcl, CreateDhcpOptions and CreateInternetGateway. You can now specify tags when creating any of these resources. For more information about tagging, see AWS Tagging Strategies. + +## __Amazon Relational Database Service__ + - ### Features + - Documentation updates for rds + +# __2.13.47__ __2020-06-29__ +## __AWS CodeStar connections__ + - ### Features + - Updated and new APIs in support of hosts for connections to installed provider types. New integration with the GitHub Enterprise Server provider type. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CodeGuru Profiler__ + - ### Features + - Amazon CodeGuru Profiler is now generally available. The Profiler helps developers to optimize their software, troubleshoot issues in production, and identify their most expensive lines of code. As part of general availability, we are launching: Profiling of AWS Lambda functions, Anomaly detection in CPU profiles, Color My Code on flame graphs, Expanding presence to 10 AWS regions. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Virtual Private Cloud (VPC) customers can now create and manage their own Prefix Lists to simplify VPC configurations. + +## __Auto Scaling__ + - ### Features + - Documentation updates for Amazon EC2 Auto Scaling. + +# __2.13.46__ __2020-06-26__ +## __AWS CloudFormation__ + - ### Features + - ListStackInstances and DescribeStackInstance now return a new `StackInstanceStatus` object that contains `DetailedStatus` values: a disambiguation of the more generic `Status` value. ListStackInstances output can now be filtered on `DetailedStatus` using the new `Filters` parameter. + +## __AWS Database Migration Service__ + - ### Features + - This release contains miscellaneous API documentation updates for AWS DMS in response to several customer reported issues. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Cognito Identity Provider__ + - ### Features + - Don't require Authorization for InitiateAuth and RespondToAuthChallenge. + +## __Amazon QuickSight__ + - ### Features + - Added support for cross-region DataSource credentials copying. + +## __Amazon SageMaker Service__ + - ### Features + - The new 'ModelClientConfig' parameter being added for CreateTransformJob and DescribeTransformJob api actions enable customers to configure model invocation related parameters such as timeout and retry. + +# __2.13.45__ __2020-06-25__ +## __AWS DynamoDB Enhanced Client__ + - ### Bugfixes + - Fixed a bug causing a NullPointerException to be thrown in the enhanced DeleteItem operation if a conditionExpression was given with null attributeNames or null attributeValues. + +## __AWS Glue__ + - ### Features + - This release adds new APIs to support column level statistics in AWS Glue Data Catalog + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Added support for tag-on-create for Host Reservations in Dedicated Hosts. You can now specify tags when you create a Host Reservation for a Dedicated Host. For more information about tagging, see AWS Tagging Strategies. + +# __2.13.44__ __2020-06-24__ +## __AWS Amplify__ + - ### Features + - This release of AWS Amplify Console introduces support for automatically creating custom subdomains for branches based on user-defined glob patterns, as well as automatically cleaning up Amplify branches when their corresponding git branches are deleted. + +## __AWS Backup__ + - ### Features + - Customers can now manage and monitor their backups in a policied manner across their AWS accounts, via an integration between AWS Backup and AWS Organizations + +## __AWS CodeCommit__ + - ### Features + - This release introduces support for reactions to CodeCommit comments. Users will be able to select from a pre-defined list of emojis to express their reaction to any comments. + +## __AWS Identity and Access Management__ + - ### Features + - Documentation updates for iam + +## __AWS Organizations__ + - ### Features + - This release adds support for a new backup policy type for AWS Organizations. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR customers can now set allocation strategies for On-Demand and Spot instances in their EMR clusters with instance fleets. These allocation strategies use real-time capacity insights to provision clusters faster and make the most efficient use of available spare capacity to allocate Spot instances to reduce interruptions. + +## __Amazon FSx__ + - ### Features + - This release adds the capability to take highly-durable, incremental backups of your FSx for Lustre persistent file systems. This capability makes it easy to further protect your file system data and to meet business and regulatory compliance requirements. + +## __Amazon Honeycode__ + - ### Features + - Introducing Amazon Honeycode - a fully managed service that allows you to quickly build mobile and web apps for teams without programming. + +## __Auto Scaling__ + - ### Features + - Documentation updates for Amazon EC2 Auto Scaling. + +# __2.13.43__ __2020-06-23__ +## __AWS MediaTailor__ + - ### Features + - AWS Elemental MediaTailor SDK now allows configuration of Bumper. + +## __AWS Organizations__ + - ### Features + - Added a new error message to support the requirement for a Business License on AWS accounts in China to create an organization. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.13.42__ __2020-06-22__ +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds Tag On Create feature support for the ImportImage, ImportSnapshot, ExportImage and CreateInstanceExportTask APIs. + +## __Amazon Elastic MapReduce__ + - ### Features + - Adding support for MaximumCoreCapacityUnits parameter for EMR Managed Scaling. It allows users to control how many units/nodes are added to the CORE group/fleet. Remaining units/nodes are added to the TASK groups/fleet in the cluster. + +## __Amazon Rekognition__ + - ### Features + - This update adds the ability to detect black frames, end credits, shots, and color bars in stored videos + +## __Amazon Relational Database Service__ + - ### Features + - Added paginators for various APIs. + +## __Amazon Simple Queue Service__ + - ### Features + - AWS SQS adds pagination support for ListQueues and ListDeadLetterSourceQueues APIs + +# __2.13.41__ __2020-06-19__ +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports Input Prepare schedule actions. This feature improves existing input switching by allowing users to prepare an input prior to switching to it. + +## __AWS OpsWorks CM__ + - ### Features + - Documentation updates for AWS OpsWorks CM. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon ElastiCache__ + - ### Features + - Documentation updates for elasticache + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Adds support to tag elastic-gpu on the RunInstances api + +# __2.13.40__ __2020-06-18__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for NexGuard FileMarker SDK, which allows NexGuard partners to watermark proprietary content in mezzanine and OTT streaming contexts. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Support__ + - ### Features + - Documentation updates for support + +## __AWSMarketplace Metering__ + - ### Features + - Documentation updates for meteringmarketplace + +## __Amazon Relational Database Service__ + - ### Features + - Adding support for global write forwarding on secondary clusters in an Aurora global database. + +## __Amazon Route 53__ + - ### Features + - Added a new ListHostedZonesByVPC API for customers to list all the private hosted zones that a specified VPC is associated with. + +## __Amazon Simple Email Service__ + - ### Features + - You can now configure Amazon SES to send event notifications when the delivery of an email is delayed because of a temporary issue. For example, you can receive a notification if the recipient's inbox is full, or if there's a temporary problem with the receiving email server. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Added offset support for specifying the number of days to wait after the date and time specified by a CRON expression before running the maintenance window. + +# __2.13.39__ __2020-06-17__ +## __AWS App Mesh__ + - ### Features + - Adds support for route and virtual node listener timeouts. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - nvmeSupport added to DescribeInstanceTypes API + +## __Amazon Import/Export Snowball__ + - ### Features + - AWS Snowcone is a portable, rugged and secure device for edge computing and data transfer. You can use Snowcone to collect, process, and move data to AWS, either offline by shipping the device to AWS or online by using AWS DataSync. With 2 CPUs and 4 GB RAM of compute and 8 TB of storage, Snowcone can run edge computing workloads and store data securely. Snowcone's small size (8.94" x 5.85" x 3.25" / 227 mm x 148.6 mm x 82.65 mm) allows you to set it next to machinery in a factory. Snowcone weighs about 4.5 lbs. (2 kg), so you can carry one in a backpack, use it with battery-based operation, and use the Wi-Fi interface to gather sensor data. Snowcone supports a file interface with NFS support. + +## __Amazon Macie 2__ + - ### Features + - This is a documentation-only update to the Amazon Macie API. This update contains miscellaneous editorial improvements to various API descriptions. + +## __Amazon Route 53__ + - ### Features + - Add PriorRequestNotComplete exception to AssociateVPCWithHostedZone API + +# __2.13.38__ __2020-06-16__ +## __AWS Data Exchange__ + - ### Features + - This release fixes a bug in the AWS Data Exchange Python and NodeJS SDKs. The 'KmsKeyArn' field in the create-job API was configured to be required instead of optional. We updated this field to be optional in this release. + +## __AWS Lambda__ + - ### Features + - Adds support for using Amazon Elastic File System (persistent storage) with AWS Lambda. This enables customers to share data across function invocations, read large reference data files, and write function output to a persistent and shared store. + +## __Amazon CloudFront__ + - ### Features + - Documentation updates for CloudFront + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Fix an NPE in `OptionalAttributeConverter` that can happen the if the `nul()` property of the `AttributeValue` is `null`. + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds new US English child voice - Kevin. Kevin is available as Neural voice only. + +## __Amazon QLDB__ + - ### Features + - Documentation updates for Amazon QLDB + +## __Auto Scaling__ + - ### Features + - Introducing instance refresh, a feature that helps you update all instances in an Auto Scaling group in a rolling fashion (for example, to apply a new AMI or instance type). You can control the pace of the refresh by defining the percentage of the group that must remain running/healthy during the replacement process and the time for new instances to warm up between replacements. + +# __2.13.37__ __2020-06-15__ +## __AWS IoT__ + - ### Features + - Added support for job executions rollout configuration, job abort configuration, and job executions timeout configuration for AWS IoT Over-the-Air (OTA) Update Feature. + +## __Alexa For Business__ + - ### Features + - Adding support for optional tags in CreateBusinessReportSchedule, CreateProfile and CreateSkillGroup APIs + +## __Amazon AppConfig__ + - ### Features + - This release adds a hosted configuration source provider. Customers can now store their application configurations directly in AppConfig, without the need for an external configuration source. + +## __Amazon Chime__ + - ### Features + - feature: Chime: This release introduces the ability to create an AWS Chime SDK meeting with attendees. + +## __Amazon Cognito Identity Provider__ + - ### Features + - Updated all AuthParameters to be sensitive. + +# __2.13.36__ __2020-06-12__ +## __AWS CloudFormation__ + - ### Features + - The following parameters now return the organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets: the OrganizationalUnitIds parameter on StackSet and the OrganizationalUnitId parameter on StackInstance, StackInstanceSummary, and StackSetOperationResultSummary + +## __AWS Glue__ + - ### Features + - You can now choose to crawl the entire table or just a sample of records in DynamoDB when using AWS Glue crawlers. Additionally, you can also specify a scanning rate for crawling DynamoDB tables. + +## __AWS SDK for Java v2 DynamoDB Enhanced Client__ + - ### Bugfixes + - Added ClientRequestToken in class TransactWriteItemsEnhancedRequest. + +## __AWS Storage Gateway__ + - ### Features + - Display EndpointType in DescribeGatewayInformation + +## __Amazon API Gateway__ + - ### Features + - Documentation updates for Amazon API Gateway + +# __2.13.35__ __2020-06-11__ +## __AWS IoT Data Plane__ + - ### Features + - As part of this release, we are introducing a new feature called named shadow, which extends the capability of AWS IoT Device Shadow to support multiple shadows for a single IoT device. With this release, customers can store different device state data into different shadows, and as a result access only the required state data when needed and reduce individual shadow size. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Avoid unnecessary copying in `AsyncRequestBody.fromBytes()` + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds support for deleting capacity providers. + +## __Amazon Lex Model Building Service__ + - ### Features + - This change adds the built-in AMAZON.KendraSearchIntent that enables integration with Amazon Kendra. + +## __EC2 Image Builder__ + - ### Features + - EC2 Image Builder now supports specifying a custom working directory for your build and test workflows. In addition, Image Builder now supports defining tags that are applied to ephemeral resources created by EC2 Image Builder as part of the image creation workflow. + +# __2.13.34__ __2020-06-10__ +## __AWS Compute Optimizer__ + - ### Features + - Compute Optimizer supports exporting recommendations to Amazon S3. + +## __AWS Service Catalog__ + - ### Features + - Service Catalog Documentation Update for Integration with AWS Organizations Delegated Administrator feature + +## __AWS Shield__ + - ### Features + - Corrections to the supported format for contact phone numbers and to the description for the create subscription action. + +## __Amazon AppConfig__ + - ### Features + - This release allows customers to choose from a list of predefined deployment strategies while starting deployments. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Reducing the schedule name of DLM Lifecycle policy from 500 to 120 characters. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - New C6g instances powered by AWS Graviton2 processors and ideal for running advanced, compute-intensive workloads; New R6g instances powered by AWS Graviton2 processors and ideal for running memory-intensive workloads. + +## __Amazon Lightsail__ + - ### Features + - Documentation updates for lightsail + +## __Amazon Macie 2__ + - ### Features + - This release of the Amazon Macie API removes support for the ArchiveFindings and UnarchiveFindings operations. This release also adds UNKNOWN as an encryption type for S3 bucket metadata. + +## __CodeArtifact__ + - ### Features + - Added support for AWS CodeArtifact. + +# __2.13.33__ __2020-06-09__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Transfer Family__ + - ### Features + - This release updates the API so customers can test use of Source IP to allow, deny or limit access to data in their S3 buckets after integrating their identity provider. + +# __2.13.32__ __2020-06-08__ +## __AWS Cloud Map__ + - ### Features + - Added support for tagging Service and Namespace type resources in Cloud Map + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Update javadoc annotation for AwsBasicCredentials + +## __AWS Shield__ + - ### Features + - This release adds the option for customers to identify a contact name and method that the DDoS Response Team can proactively engage when a Route 53 Health Check that is associated with a Shield protected resource fails. + +# __2.13.31__ __2020-06-05__ +## __AWS Elastic Beanstalk__ + - ### Features + - These API changes enable an IAM user to associate an operations role with an Elastic Beanstalk environment, so that the IAM user can call Elastic Beanstalk actions without having access to underlying downstream AWS services that these actions call. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Service Catalog__ + - ### Features + - This release adds support for DescribeProduct and DescribeProductAsAdmin by product name, DescribeProvisioningArtifact by product name or provisioning artifact name, returning launch paths as part of DescribeProduct output and adds maximum length for provisioning artifact name and provisioning artifact description. + +## __Amazon API Gateway__ + - ### Features + - Amazon API Gateway now allows customers of REST APIs to skip trust chain validation for backend server certificates for HTTP and VPC Link Integration. This feature enables customers to configure their REST APIs to integrate with backends that are secured with certificates vended from private certificate authorities (CA) or certificates that are self-signed. + +## __Amazon CloudFront__ + - ### Features + - Amazon CloudFront adds support for configurable origin connection attempts and origin connection timeout. + +## __Amazon Personalize__ + - ### Features + - [Personalize] Adds ability to create and apply filters. + +## __Amazon Personalize Runtime__ + - ### Features + - [Personalize] Adds ability to apply filter to real-time recommendations + +## __Amazon Pinpoint__ + - ### Features + - This release enables additional functionality for the Amazon Pinpoint journeys feature. With this release, you can send messages through additional channels, including SMS, push notifications, and custom channels. + +## __Amazon SageMaker Runtime__ + - ### Features + - You can now specify the production variant to send the inference request to, when invoking a SageMaker Endpoint that is running two or more variants. + +# __2.13.30__ __2020-06-04__ +## __AWS Elemental MediaPackage VOD__ + - ### Features + - You can now restrict direct access to AWS Elemental MediaPackage by securing requests for VOD content using CDN authorization. With CDN authorization, content requests require a specific HTTP header and authorization code. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWSMarketplace Metering__ + - ### Features + - Documentation updates for meteringmarketplace + +## __Amazon Elastic Compute Cloud__ + - ### Features + - New C5a instances, the latest generation of EC2's compute-optimized instances featuring AMD's 2nd Generation EPYC processors. C5a instances offer up to 96 vCPUs, 192 GiB of instance memory, 20 Gbps in Network bandwidth; New G4dn.metal bare metal instance with 8 NVIDIA T4 GPUs. + +## __Amazon Lightsail__ + - ### Features + - This release adds the BurstCapacityPercentage and BurstCapacityTime instance metrics, which allow you to track the burst capacity available to your instance. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - SSM State Manager support for executing an association only at specified CRON schedule after creating/updating an association. + +# __2.13.29__ __2020-06-03__ +## __AWS Direct Connect__ + - ### Features + - This release supports the virtual interface failover test, which allows you to verify that traffic routes over redundant virtual interfaces when you bring your primary virtual interface out of service. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for the encoding of VP8 or VP9 video in WebM container with Vorbis or Opus audio. + +## __AWS Glue__ + - ### Features + - Adding databaseName in the response for GetUserDefinedFunctions() API. + +## __AWS Identity and Access Management__ + - ### Features + - GenerateServiceLastAccessedDetails will now return ActionLastAccessed details for certain S3 control plane actions + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon ElastiCache__ + - ### Features + - This release improves the Multi-AZ feature in ElastiCache by adding a separate flag and proper validations. + +## __Amazon Elasticsearch Service__ + - ### Features + - Amazon Elasticsearch Service now offers support for cross-cluster search, enabling you to perform searches, aggregations, and visualizations across multiple Amazon Elasticsearch Service domains with a single query or from a single Kibana interface. New feature includes the ability to setup connection, required to perform cross-cluster search, between domains using an approval workflow. + +# __2.13.28__ __2020-06-02__ +## __Amazon GuardDuty__ + - ### Features + - Amazon GuardDuty findings now include S3 bucket details under the resource section if an S3 Bucket was one of the affected resources + +# __2.13.27__ __2020-06-01__ +## __AWS Key Management Service__ + - ### Features + - AWS Key Management Service (AWS KMS): If the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext APIs are called on a CMK in a custom key store (origin == AWS_CLOUDHSM), they return an UnsupportedOperationException. If a call to UpdateAlias causes a customer to exceed the Alias resource quota, the UpdateAlias API returns a LimitExceededException. + +## __AWS Maven Lambda Archetype__ + - ### Features + - Updated the `archetype-lambda` to generate SDK client that uses region from environment variable. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Athena__ + - ### Features + - This release adds support for connecting Athena to your own Apache Hive Metastores in addition to the AWS Glue Data Catalog. For more information, please see https://docs.aws.amazon.com/athena/latest/ug/connect-to-data-source-hive.html + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR now supports encrypting log files with AWS Key Management Service (KMS) customer managed keys. + +## __Amazon FSx__ + - ### Features + - New capabilities to update storage capacity and throughput capacity of your file systems, providing the flexibility to grow file storage and to scale up or down the available performance as needed to meet evolving storage needs over time. + +## __Amazon SageMaker Service__ + - ### Features + - We are releasing HumanTaskUiArn as a new parameter in CreateLabelingJob and RenderUiTemplate which can take an ARN for a system managed UI to render a task. + +## __Amazon WorkLink__ + - ### Features + - Amazon WorkLink now supports resource tagging for fleets. + +# __2.13.26__ __2020-05-28__ +## __AWS Marketplace Catalog Service__ + - ### Features + - AWS Marketplace Catalog now supports accessing initial change payloads with DescribeChangeSet operation. + +## __Amazon QLDB Session__ + - ### Features + - Documentation updates for Amazon QLDB Session + +## __Amazon WorkMail__ + - ### Features + - This release adds support for Amazon WorkMail organization-level retention policies. + +## __Managed Streaming for Kafka__ + - ### Features + - New APIs for upgrading the Apache Kafka version of a cluster and to find out compatible upgrade paths + +# __2.13.25__ __2020-05-27__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon GuardDuty__ + - ### Features + - Documentation updates for GuardDuty + +## __Amazon S3__ + - ### Bugfixes + - Check the `x-amz-content-range` header for `GetObject` responses when the `Content-Range` header is not returned by the service. Fixes [#1209](https://github.com/aws/aws-sdk-java-v2/issues/1209). + +## __Elastic Load Balancing__ + - ### Features + - This release added support for HTTP/2 ALPN preference lists for Network Load Balancers + +# __2.13.24__ __2020-05-26__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Allowing cron expression in the DLM policy creation schedule. + +## __Amazon ElastiCache__ + - ### Features + - Amazon ElastiCache now allows you to use resource based policies to manage access to operations performed on ElastiCache resources. Also, Amazon ElastiCache now exposes ARN (Amazon Resource Names) for ElastiCache resources such as Cache Clusters and Parameter Groups. ARNs can be used to apply IAM policies to ElastiCache resources. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - ebsOptimizedInfo, efaSupported and supportedVirtualizationTypes added to DescribeInstanceTypes API + +## __Amazon Macie__ + - ### Features + - This is a documentation-only update to the Amazon Macie Classic API. This update corrects out-of-date references to the service name. + +## __Amazon QuickSight__ + - ### Features + - Add DataSetArns to QuickSight DescribeDashboard API response. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - The AWS Systems Manager GetOpsSummary API action now supports multiple OpsResultAttributes in the request. Currently, this feature only supports OpsResultAttributes with the following TypeNames: [AWS:EC2InstanceComputeOptimizer] or [AWS:EC2InstanceInformation, AWS:EC2InstanceComputeOptimizer]. These TypeNames can be used along with either or both of the following: [AWS:EC2InstanceRecommendation, AWS:RecommendationSource] + +# __2.13.23__ __2020-05-22__ +## __AWS IoT SiteWise__ + - ### Features + - This release adds support for the standard deviation auto-computed aggregate and improved support for portal logo images in SiteWise. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Auto Scaling__ + - ### Features + - Documentation updates for Amazon EC2 Auto Scaling + +# __2.13.22__ __2020-05-21__ +## __AWS CodeBuild__ + - ### Features + - CodeBuild adds support for tagging with report groups + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed an issue where a service returning an unknown response event type would cause a failure. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - From this release onwards ProvisionByoipCidr publicly supports IPv6. Updated ProvisionByoipCidr API to support tags for public IPv4 and IPv6 pools. Added NetworkBorderGroup to the DescribePublicIpv4Pools response. + +## __Amazon Simple Storage Service__ + - ### Features + - Deprecates unusable input members bound to Content-MD5 header. Updates example and documentation. + +## __Synthetics__ + - ### Features + - AWS CloudWatch Synthetics now supports configuration of allocated memory for a canary. + +# __2.13.21__ __2020-05-20__ +## __AWS App Mesh__ + - ### Features + - List APIs for all resources now contain additional information: when a resource was created, last updated, and its current version number. + +## __AWS Backup__ + - ### Features + - This release allows customers to enable or disable AWS Backup support for an AWS resource type. This release also includes new APIs, update-region-settings and describe-region-settings, which can be used to opt in to a specific resource type. For all current AWS Backup customers, the default settings enable support for EBS, EC2, StorageGateway, EFS, DDB and RDS resource types. + +## __AWS CodeDeploy__ + - ### Features + - Amazon ECS customers using application and network load balancers can use CodeDeploy BlueGreen hook to invoke a CloudFormation stack update. With this update you can view CloudFormation deployment and target details via existing APIs and use your stack Id to list or delete all deployments associated with the stack. + +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports the ability to ingest the content that is streaming from an AWS Elemental Link device: https://aws.amazon.com/medialive/features/link/. This release also adds support for SMPTE-2038 and input state waiters. + +## __AWS SecurityHub__ + - ### Features + - For findings related to controls, the finding information now includes the reason behind the current status of the control. A new field for the findings original severity allows finding providers to use the severity values from the system they use to assign severity. + +## __Amazon Chime__ + - ### Features + - Amazon Chime enterprise account administrators can now set custom retention policies on chat data in the Amazon Chime application. + +## __Amazon Transcribe Streaming Service__ + - ### Features + - This release adds support for vocabulary filtering in streaming with which you can filter unwanted words from the real-time transcription results. Visit https://docs.aws.amazon.com/transcribe/latest/dg/how-it-works.html to learn more. + +## __Application Auto Scaling__ + - ### Features + - Documentation updates for Application Auto Scaling + +# __2.13.20__ __2020-05-19__ +## __AWS Health APIs and Notifications__ + - ### Features + - Feature: Health: AWS Health added a new field to differentiate Public events from Account-Specific events in the API request and response. Visit https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html to learn more. + +## __AWS SDK for Java v2__ + - ### Features + - Endpoint discovery is now enabled by default for future services that will require it. A new method 'endpointDiscoveryEnabled' has been added to client builders that support endpoint discovery allowing a true or false value to be set. 'enableEndpointDiscovery' has been deprecated on the client builders as it is now superseded by 'endpointDiscoveryEnabled'. + - Updated service endpoint metadata. + +## __Amazon Chime__ + - ### Features + - You can now receive Voice Connector call events through SNS or SQS. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for Federated Authentication via SAML-2.0 in AWS ClientVPN. + +## __Amazon Transcribe Service__ + - ### Features + - Documentation updates for Amazon Transcribe. + +# __2.13.19__ __2020-05-18__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Chime__ + - ### Features + - Amazon Chime now supports redacting chat messages. + +## __Amazon DynamoDB__ + - ### Features + - Documentation updates for dynamodb + +## __Amazon EC2 Container Service__ + - ### Features + - This release adds support for specifying environment files to add environment variables to your containers. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release changes the RunInstances CLI and SDK's so that if you do not specify a client token, a randomly generated token is used for the request to ensure idempotency. + +## __Amazon Macie 2__ + - ### Features + - Documentation updates for Amazon Macie + +## __Amazon QLDB__ + - ### Features + - Amazon QLDB now supports Amazon Kinesis data streams. You can now emit QLDB journal data, via the new QLDB Streams feature, directly to Amazon Kinesis supporting event processing and analytics among related use cases. + +# __2.13.18__ __2020-05-15__ +## __AWS CloudFormation__ + - ### Features + - This release adds support for the following features: 1. DescribeType and ListTypeVersions APIs now output a field IsDefaultVersion, indicating if a version is the default version for its type; 2. Add StackRollbackComplete waiter feature to wait until stack status is UPDATE_ROLLBACK_COMPLETE; 3. Add paginators in DescribeAccountLimits, ListChangeSets, ListStackInstances, ListStackSetOperationResults, ListStackSetOperations, ListStackSets APIs. + +## __AWS Glue__ + - ### Features + - Starting today, you can stop the execution of Glue workflows that are running. AWS Glue workflows are directed acyclic graphs (DAGs) of Glue triggers, crawlers and jobs. Using a workflow, you can design a complex multi-job extract, transform, and load (ETL) activity that AWS Glue can execute and track as single entity. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Allow event structures to be used as operation outputs outside of streaming contexts. + - Fix generation for services that contain operations with the same name as the service. + +## __AWS Security Token Service__ + - ### Features + - API updates for STS + +## __Amazon EC2 Container Registry__ + - ### Features + - This release adds support for specifying an image manifest media type when pushing a manifest to Amazon ECR. + +# __2.13.17__ __2020-05-14__ +## __AWS SDK for Java v2__ + - ### Features + - Expose the `extendedRequestId` from `SdkServiceException`, so it can be provided to support to investigate issues. + - Updated service endpoint metadata. + + - ### Bugfixes + - Fix generation for operations that share an output shape. + - Fix unmarshalling of events when structure member name and shape name mismatch. + - Support event streams that are shared between two operations. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 now supports adding AWS resource tags for associations between VPCs and local gateways, at creation time. + +## __Amazon RDS__ + - ### Features + - Add SourceRegion to CopyDBClusterSnapshot and CreateDBCluster operations. As with CopyDBSnapshot and CreateDBInstanceReadReplica, specifying this field will automatically populate the PresignedURL field with a valid value. + +## __EC2 Image Builder__ + - ### Features + - This release adds a new parameter (SupportedOsVersions) to the Components API. This parameter lists the OS versions supported by a component. + +# __2.13.16__ __2020-05-13__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix a race condition in `FileAsyncResponseTransformer` where the future fails to complete when onComplete event is dispatched on the same thread that executed request + +## __Amazon ElastiCache__ + - ### Features + - Amazon ElastiCache now supports auto-update of ElastiCache clusters after the "recommended apply by date" of service update has passed. ElastiCache will use your maintenance window to schedule the auto-update of applicable clusters. For more information, see https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Self-Service-Updates.html and https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Self-Service-Updates.html + +## __Amazon Macie 2__ + - ### Features + - This release introduces a new major version of the Amazon Macie API. You can use this version of the API to develop tools and applications that interact with the new Amazon Macie. + +# __2.13.15__ __2020-05-12__ +## __AWS IoT SiteWise__ + - ### Features + - Documentation updates for iot-bifrost + +## __Amazon WorkMail__ + - ### Features + - Minor API fixes and updates to the documentation. + +# __2.13.14__ __2020-05-11__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWSKendraFrontendService__ + - ### Features + - Amazon Kendra is now generally available. As part of general availability, we are launching Metrics for query & storage utilization + +## __Amazon CodeGuru Reviewer__ + - ### Features + - Add Bitbucket integration APIs + +## __Amazon Elastic Compute Cloud__ + - ### Features + - M6g instances are our next-generation general purpose instances powered by AWS Graviton2 processors + +# __2.13.13__ __2020-05-08__ +## __AWS Resource Groups Tagging API__ + - ### Features + - Documentation updates for resourcegroupstaggingapi + +## __AWS SDK for Java v2__ + - ### Features + - A helpful error message is now raised when an obviously-invalid region name is given to the SDK, instead of the previous NullPointerException. Fixes [#1642](https://github.com/aws/aws-sdk-java-v2/issues/1642). + - Updated service endpoint metadata. + +## __Amazon GuardDuty__ + - ### Features + - Documentation updates for GuardDuty + +## __Amazon SageMaker Service__ + - ### Features + - This release adds a new parameter (EnableInterContainerTrafficEncryption) to CreateProcessingJob API to allow for enabling inter-container traffic encryption on processing jobs. + +# __2.13.12__ __2020-05-07__ +## __AWS CodeBuild__ + - ### Features + - Add COMMIT_MESSAGE enum for webhook filter types + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon AppConfig__ + - ### Features + - The description of the AWS AppConfig GetConfiguration API action was amended to include important information about calling ClientConfigurationVersion when you configure clients to call GetConfiguration. + +## __Amazon CloudWatch Logs__ + - ### Features + - Amazon CloudWatch Logs now offers the ability to interact with Logs Insights queries via the new PutQueryDefinition, DescribeQueryDefinitions, and DeleteQueryDefinition APIs. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 now adds warnings to identify issues when creating a launch template or launch template version. + +## __Amazon Lightsail__ + - ### Features + - This release adds support for the following options in instance public ports: Specify source IP addresses, specify ICMP protocol like PING, and enable/disable the Lightsail browser-based SSH and RDP clients' access to your instance. + +## __Amazon Route 53__ + - ### Features + - Amazon Route 53 now supports the EU (Milan) Region (eu-south-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - This Patch Manager release supports creating patch baselines for Oracle Linux and Debian + +# __2.13.11__ __2020-05-06__ +## __AWS CodeStar connections__ + - ### Features + - Added support for tagging resources in AWS CodeStar Connections + +## __AWS Comprehend Medical__ + - ### Features + - New Batch Ontology APIs for ICD-10 and RxNorm will provide batch capability of linking the information extracted by Comprehend Medical to medical ontologies. The new ontology linking APIs make it easy to detect medications and medical conditions in unstructured clinical text and link them to RxNorm and ICD-10-CM codes respectively. This new feature can help you reduce the cost, time and effort of processing large amounts of unstructured medical text with high accuracy. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +# __2.13.10__ __2020-05-05__ +## __AWS SDJ for Java v2__ + - ### Features + - Updating dependency version: Jackson 2.10.3 -> 2.10.4, and combine dependency Jackson-annotations with Jackson. + +## __AWS Support__ + - ### Features + - Documentation updates for support + +## __Amazon DynamoDB__ + - ### Bugfixes + - Tweaked the javadocs for Get/Update, since it was previously wrongfully copied over from Delete and mentions the "delete operation". + +## __Amazon Elastic Compute Cloud__ + - ### Features + - With this release, you can call ModifySubnetAttribute with two new parameters: MapCustomerOwnedIpOnLaunch and CustomerOwnedIpv4Pool, to map a customerOwnedIpv4Pool to a subnet. You will also see these two new fields in the DescribeSubnets response. If your subnet has a customerOwnedIpv4Pool mapped, your network interface will get an auto assigned customerOwnedIpv4 address when placed onto an instance. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - AWS Systems Manager Parameter Store launches new data type to support aliases in EC2 APIs + +# __2.13.9__ __2020-05-04__ +## __AWS S3 Control__ + - ### Features + - Amazon S3 Batch Operations now supports Object Lock. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon API Gateway__ + - ### Features + - Documentation updates for Amazon API Gateway + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Bugfix for handling special characters ':' and '#' in attribute names + +## __Amazon Elastic Compute Cloud__ + - ### Features + - With this release, you can include enriched metadata in Amazon Virtual Private Cloud (Amazon VPC) flow logs published to Amazon CloudWatch Logs or Amazon Simple Storage Service (S3). Prior to this, custom format VPC flow logs enriched with additional metadata could be published only to S3. With this launch, we are also adding additional metadata fields that provide insights about the location such as AWS Region, AWS Availability Zone, AWS Local Zone, AWS Wavelength Zone, or AWS Outpost where the network interface where flow logs are captured exists. + +# __2.13.8__ __2020-05-01__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Elastic File System__ + - ### Features + - Change the TagKeys argument for UntagResource to a URL parameter to address an issue with the Java and .NET SDKs. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Added TimeoutSeconds as part of ListCommands API response. + +# __2.13.7__ __2020-04-30__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK has added support for including AFD signaling in MXF wrapper. + +## __AWS IoT__ + - ### Features + - AWS IoT Core released Fleet Provisioning for scalable onboarding of IoT devices to the cloud. This release includes support for customer's Lambda functions to validate devices during onboarding. Fleet Provisioning also allows devices to send Certificate Signing Requests (CSR) to AWS IoT Core for signing and getting a unique certificate. Lastly, AWS IoT Core added a feature to register the same certificate for multiple accounts in the same region without needing to register the certificate authority (CA). + +## __AWS IoT Events__ + - ### Features + - Doc only update to correct APIs and related descriptions + +## __AWS Lambda__ + - ### Features + - Documentation updates for Lambda + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Storage Gateway__ + - ### Features + - Adding support for S3_INTELLIGENT_TIERING as a storage class option + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Made OperationContext a public interface and moved it into public namespace as it was already exposed through another public interface. This will only impact extensions that have been written to reference the old internal-only class that should now switch to the approved stable public interface. + +## __Schemas__ + - ### Features + - Add support for resource policies for Amazon EventBridge Schema Registry, which is now generally available. + +# __2.13.6__ __2020-04-29__ +## __AWS Cloud Map__ + - ### Features + - Documentation updates for servicediscovery + +## __AWS IoT SiteWise__ + - ### Features + - AWS IoT SiteWise is a managed service that makes it easy to collect, store, organize and monitor data from industrial equipment at scale. You can use AWS IoT SiteWise to model your physical assets, processes and facilities, quickly compute common industrial performance metrics, and create fully managed web applications to help analyze industrial equipment data, prevent costly equipment issues, and reduce production inefficiencies. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS WAF__ + - ### Features + - This release add migration API for AWS WAF Classic ("waf" and "waf-regional"). The migration API will parse through your web ACL and generate a CloudFormation template into your S3 bucket. Deploying this template will create equivalent web ACL under new AWS WAF ("wafv2"). + +## __AWS WAF Regional__ + - ### Features + - This release add migration API for AWS WAF Classic ("waf" and "waf-regional"). The migration API will parse through your web ACL and generate a CloudFormation template into your S3 bucket. Deploying this template will create equivalent web ACL under new AWS WAF ("wafv2"). + +## __Amazon Transcribe Service__ + - ### Features + - With this release, you can now use Amazon Transcribe to create medical custom vocabularies and use them in both medical real-time streaming and medical batch transcription jobs. + +# __2.13.5__ __2020-04-28__ +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports several new features: enhanced VQ for H.264 (AVC) output encodes; passthrough of timed metadata and of Nielsen ID3 metadata in fMP4 containers in HLS outputs; the ability to generate a SCTE-35 sparse track without additional segmentation, in Microsoft Smooth outputs; the ability to select the audio from a TS input by specifying the audio track; and conversion of HDR colorspace in the input to an SDR colorspace in the output. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DynamoDB Enhacned__ + - ### Bugfixes + - Fix NPE on EnhancedType, created with documentOf, when calling innerToString + +## __Amazon EC2 Container Registry__ + - ### Features + - This release adds support for multi-architecture images also known as a manifest list + +## __Amazon Kinesis Video Streams__ + - ### Features + - Add "GET_CLIP" to the list of supported API names for the GetDataEndpoint API. + +## __Amazon Kinesis Video Streams Archived Media__ + - ### Features + - Add support for the GetClip API for retrieving media from a video stream in the MP4 format. + +## __Amazon Route 53__ + - ### Features + - Amazon Route 53 now supports the Africa (Cape Town) Region (af-south-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - SSM State Manager support for adding list association filter for Resource Group and manual mode of managing compliance for an association. + +# __2.13.4__ __2020-04-27__ +## __AWS Data Exchange__ + - ### Features + - This release introduces AWS Data Exchange support for configurable encryption parameters when exporting data sets to Amazon S3. + +## __AWS Database Migration Service__ + - ### Features + - Adding minimum replication engine version for describe-endpoint-types api. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + - Various performance improvements. + +## __Access Analyzer__ + - ### Features + - This release adds support for inclusion of S3 Access Point policies in IAM Access Analyzer evaluation of S3 bucket access. IAM Access Analyzer now reports findings for buckets shared through access points and identifies the access point that permits access. + +## __Amazon SageMaker Service__ + - ### Features + - Change to the input, ResourceSpec, changing EnvironmentArn to SageMakerImageArn. This affects the following preview APIs: CreateDomain, DescribeDomain, UpdateDomain, CreateUserProfile, DescribeUserProfile, UpdateUserProfile, CreateApp and DescribeApp. + +# __2.13.3__ __2020-04-24__ +## __AWS IoT__ + - ### Features + - This release adds a new exception type to the AWS IoT SetV2LoggingLevel API. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + + - ### Bugfixes + - Fixed bean-style setter names on serializable builders to match bean-style getter names. + +## __Amazon Data Lifecycle Manager__ + - ### Features + - Enable 1hour frequency in the schedule creation for Data LifeCycle Manager. + +## __Amazon Elastic Inference__ + - ### Features + - This feature allows customers to describe the accelerator types and offerings on any region where Elastic Inference is available. + +# __2.13.2__ __2020-04-23__ +## __AWS Elemental MediaPackage VOD__ + - ### Features + - Adds tagging support for PackagingGroups, PackagingConfigurations, and Assets + +## __AWS Resource Access Manager__ + - ### Features + - AWS Resource Access Manager (RAM) provides a new ListResourceTypes action. This action lets you list the resource types that can be shared using AWS RAM. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS Storage Gateway__ + - ### Features + - Added AutomaticTapeCreation APIs + +## __AWS Transfer Family__ + - ### Features + - This release adds support for transfers over FTPS and FTP in and out of Amazon S3, which makes it easy to migrate File Transfer Protocol over SSL (FTPS) and FTP workloads to AWS, in addition to the existing support for Secure File Transfer Protocol (SFTP). + +## __Amazon Kinesis Firehose__ + - ### Features + - You can now deliver streaming data to an Amazon Elasticsearch Service domain in an Amazon VPC. You can now compress streaming data delivered to S3 using Hadoop-Snappy in addition to Gzip, Zip and Snappy formats. + +## __Amazon Pinpoint__ + - ### Features + - This release of the Amazon Pinpoint API enhances support for sending campaigns through custom channels to locations such as AWS Lambda functions or web applications. Campaigns can now use CustomDeliveryConfiguration and CampaignCustomMessage to configure custom channel settings for a campaign. + +## __Amazon Relational Database Service__ + - ### Features + - Adds support for AWS Local Zones, including a new optional parameter AvailabilityZoneGroup for the DescribeOrderableDBInstanceOptions operation. + +## __Application Auto Scaling__ + - ### Features + - This release supports Auto Scaling in Amazon Keyspaces for Apache Cassandra. + +# __2.13.1__ __2020-04-22__ +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CodeGuru Reviewer__ + - ### Features + - Add support for code review and recommendation feedback APIs. + +## __Amazon Elasticsearch Service__ + - ### Features + - This change adds a new field 'OptionalDeployment' to ServiceSoftwareOptions to indicate whether a service software update is optional or mandatory. If True, it indicates that the update is optional, and the service software is not automatically updated. If False, the service software is automatically updated after AutomatedUpdateDate. + +## __Amazon Redshift__ + - ### Features + - Amazon Redshift support for usage limits + +## __Amazon Transcribe Streaming Service__ + - ### Features + - Adding ServiceUnavailableException as one of the expected exceptions + +## __Firewall Management Service__ + - ### Features + - This release is to support AWS Firewall Manager policy with Organizational Unit scope. + +# __2.13.0__ __2020-04-21__ +## __AWS Cost Explorer Service__ + - ### Features + - Cost Explorer Rightsizing Recommendations integrates with Compute Optimizer and begins offering across instance family rightsizing recommendations, adding to existing support for within instance family rightsizing recommendations. + +## __AWS SDK for Java v2__ + - ### Features + - Bump minor version to '2.13.0-SNAPSHOT' because of upgrade of Jackson version. + - Updated service endpoint metadata. + - Updating dependency version: Jackson 2.10.0 -> 2.10.3, Jackson-annotations 2.9.0 -> 2.10.0. + +## __Amazon Elastic MapReduce__ + - ### Features + - Amazon EMR adds support for configuring a managed scaling policy for an Amazon EMR cluster. This enables automatic resizing of a cluster to optimize for job execution speed and reduced cluster cost. + +## __Amazon GuardDuty__ + - ### Features + - AWS GuardDuty now supports using AWS Organizations delegated administrators to create and manage GuardDuty master and member accounts. The feature also allows GuardDuty to be automatically enabled on associated organization accounts. + +## __Amazon Route 53 Domains__ + - ### Features + - You can now programmatically transfer domains between AWS accounts without having to contact AWS Support + +# __2.12.0__ __2020-04-20__ +## __AWS Cost Explorer Service__ + - ### Features + - Cost Categories API is now General Available with new dimensions and operations support. You can map costs by account name, service, and charge type dimensions as well as use contains, starts with, and ends with operations. Cost Categories can also be used in RI and SP coverage reports. + +## __AWS Glue__ + - ### Features + - Added a new ConnectionType "KAFKA" and a ConnectionProperty "KAFKA_BOOTSTRAP_SERVERS" to support Kafka connection. + +## __AWS IoT Events__ + - ### Features + - API update that allows users to add AWS Iot SiteWise actions while creating Detector Model in AWS Iot Events + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon DynamoDB Enhanced Client__ + - ### Features + - The Amazon DynamoDB Enhanced Client is now generally available and provides a natural and intuitive interface for developers to integrate their applications with Amazon DynamoDB by means of an adaptive API that will map inputs and results to and from Java objects modeled by the application, rather than requiring the developers to implement that transformation themselves. + +## __AmazonApiGatewayV2__ + - ### Features + - You can now export an OpenAPI 3.0 compliant API definition file for Amazon API Gateway HTTP APIs using the Export API. + +## __Synthetics__ + - ### Features + - Introducing CloudWatch Synthetics. This is the first public release of CloudWatch Synthetics. + +# __2.11.14__ __2020-04-17__ +## __AWS OpsWorks CM__ + - ### Features + - Documentation updates for opsworkscm + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Fraud Detector__ + - ### Features + - Added support for a new rule engine execution mode. Customers will be able to configure their detector versions to evaluate all rules and return outcomes from all 'matched' rules in the GetPrediction API response. Added support for deleting Detectors (DeleteDetector) and Rule Versions (DeleteRuleVersion). + +# __2.11.13__ __2020-04-16__ +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert now allows you to specify your input captions frame rate for SCC captions sources. + +## __AWS Glue__ + - ### Features + - This release adds support for querying GetUserDefinedFunctions API without databaseName. + +## __AWS IoT Events__ + - ### Features + - API update that allows users to customize event action payloads, and adds support for Amazon DynamoDB actions. + +## __AWS Lambda__ + - ### Features + - Sample code for AWS Lambda operations + +## __AWS MediaTailor__ + - ### Features + - AWS Elemental MediaTailor SDK now allows configuration of Avail Suppression. + +## __AWS Migration Hub__ + - ### Features + - Adding ThrottlingException + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __AWS SecurityHub__ + - ### Features + - Added a new BatchUpdateFindings action, which allows customers to update selected information about their findings. Security Hub customers use BatchUpdateFindings to track their investigation into a finding. BatchUpdateFindings is intended to replace the UpdateFindings action, which is deprecated. + +## __Amazon Augmented AI Runtime__ + - ### Features + - This release updates Amazon Augmented AI ListHumanLoops and StartHumanLoop APIs. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 now supports adding AWS resource tags for placement groups and key pairs, at creation time. The CreatePlacementGroup API will now return placement group information when created successfully. The DeleteKeyPair API now supports deletion by resource ID. + +## __Amazon Import/Export Snowball__ + - ### Features + - An update to the Snowball Edge Storage Optimized device has been launched. Like the previous version, it has 80 TB of capacity for data transfer. Now it has 40 vCPUs, 80 GiB, and a 1 TiB SATA SSD of memory for EC2 compatible compute. The 80 TB of capacity can also be used for EBS-like volumes for AMIs. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds support for Amazon RDS Proxy with PostgreSQL compatibility. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker now supports running training jobs on ml.g4dn and ml.c5n instance types. Amazon SageMaker supports in "IN" operation for Search now. + +## __EC2 Image Builder__ + - ### Features + - This release includes support for additional OS Versions within EC2 Image Builder. + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Mark a connection as unreusable if there was a 5xx server error so that a new request will establish a new connection. + +# __2.11.12__ __2020-04-08__ +## __AWS CloudFormation__ + - ### Features + - The OrganizationalUnitIds parameter on StackSet and the OrganizationalUnitId parameter on StackInstance, StackInstanceSummary, and StackSetOperationResultSummary are now reserved for internal use. No data is returned for this parameter. + +## __AWS Elemental MediaConvert__ + - ### Features + - AWS Elemental MediaConvert SDK adds support for queue hopping. Jobs can now hop from their original queue to a specified alternate queue, based on the maximum wait time that you specify in the job settings. + +## __AWS Migration Hub Config__ + - ### Features + - Adding ThrottlingException + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Chime__ + - ### Features + - feature: Chime: This release introduces the ability to tag Amazon Chime SDK meeting resources. You can use tags to organize and identify your resources for cost allocation. + +## __Amazon CodeGuru Profiler__ + - ### Features + - CodeGuruProfiler adds support for resource based authorization to submit profile data. + +## __Amazon EC2 Container Service__ + - ### Features + - This release provides native support for specifying Amazon EFS file systems as volumes in your Amazon ECS task definitions. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release provides the ability to include tags in EC2 event notifications. + +# __2.11.11__ __2020-04-07__ +## __AWS MediaConnect__ + - ### Features + - You can now send content from your MediaConnect flow to your virtual private cloud (VPC) without going over the public internet. + +## __Amazon API Gateway__ + - ### Features + - Documentation updates for Amazon API Gateway. + +## __Amazon CodeGuru Reviewer__ + - ### Features + - API updates for CodeGuruReviewer + +# __2.11.10__ __2020-04-06__ +## __AWS Elastic Beanstalk__ + - ### Features + - This release adds a new action, ListPlatformBranches, and updates two actions, ListPlatformVersions and DescribePlatformVersion, to support the concept of Elastic Beanstalk platform branches. + +## __AWS Identity and Access Management__ + - ### Features + - Documentation updates for AWS Identity and Access Management (IAM). + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Chime__ + - ### Features + - Amazon Chime proxy phone sessions let you provide two users with a shared phone number to communicate via voice or text for up to 12 hours without revealing personal phone numbers. When users call or message the provided phone number, they are connected to the other party and their private phone numbers are replaced with the shared number in Caller ID. + +## __Amazon Transcribe Service__ + - ### Features + - This release adds support for batch transcription jobs within Amazon Transcribe Medical. + +# __2.11.9__ __2020-04-03__ +## __AWS RoboMaker__ + - ### Features + - Added support for limiting simulation unit usage, giving more predictable control over simulation cost + +## __AWS S3__ + - ### Features + - Allow DefaultS3Presigner.Builder to take a custom S3Configuration + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon Personalize Runtime__ + - ### Features + - Amazon Personalize: Add new response field "score" to each item returned by GetRecommendations and GetPersonalizedRanking (HRNN-based recipes only) + +# __2.11.8__ __2020-04-02__ +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental MediaLive now supports Automatic Input Failover. This feature provides resiliency upstream of the channel, before ingest starts. + +## __AWS SDK for Java v2__ + - ### Features + - Updated service endpoint metadata. + +## __Amazon CloudWatch__ + - ### Features + - Amazon CloudWatch Contributor Insights adds support for tags and tagging on resource creation. + +## __Amazon GameLift__ + - ### Features + - Public preview of GameLift FleetIQ as a standalone feature. GameLift FleetIQ makes it possible to use low-cost Spot instances by limiting the chance of interruptions affecting game sessions. FleetIQ is a feature of the managed GameLift service, and can now be used with game hosting in EC2 Auto Scaling groups that you manage in your own account. + +## __Amazon Redshift__ + - ### Features + - Documentation updates for redshift + +## __Amazon Relational Database Service__ + - ### Features + - Documentation updates for RDS: creating read replicas is now supported for SQL Server DB instances + # __2.11.7__ __2020-04-01__ ## __AWS IoT__ - ### Features diff --git a/README.md b/README.md index 9eff33ae8e04..440c63801185 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.11.7 + 2.13.55 pom import @@ -83,12 +83,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.11.7 + 2.13.55 software.amazon.awssdk s3 - 2.11.7 + 2.13.55 ``` @@ -100,7 +100,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.11.7 + 2.13.55 ``` diff --git a/archetypes/archetype-lambda/README.md b/archetypes/archetype-lambda/README.md index 1992b020ddfa..5702a787b739 100755 --- a/archetypes/archetype-lambda/README.md +++ b/archetypes/archetype-lambda/README.md @@ -14,7 +14,7 @@ You can use `mvn archetype:generate` to generate a project using this archetype. mvn archetype:generate \ -DarchetypeGroupId=software.amazon.awssdk \ -DarchetypeArtifactId=archetype-lambda \ - -DarchetypeVersion=2.x\ + -DarchetypeVersion=2.x ``` - Batch mode @@ -23,12 +23,11 @@ mvn archetype:generate \ mvn archetype:generate \ -DarchetypeGroupId=software.amazon.awssdk \ -DarchetypeArtifactId=archetype-lambda \ - -DarchetypeVersion=2.x\ + -DarchetypeVersion=2.x \ -DgroupId=com.test \ -DartifactId=sample-project \ -Dservice=s3 \ - -Dregion=us-west-2 \ - -DinteractiveMode=false \ + -DinteractiveMode=false ``` ### Parameters @@ -36,9 +35,9 @@ mvn archetype:generate \ Parameter Name | Default Value | Description ---|---|--- `service` (required) | n/a | Specifies the service client to be used in the lambda function, eg: s3, dynamodb. You can find available services [here][java-sdk-v2-services]. -`region` (required) | n/a | Specifies the region to be set for the SDK client in the application `groupId`(required) | n/a | Specifies the group ID of the project `artifactId`(required) | n/a | Specifies the artifact ID of the project +`region` | n/a | Specifies the region to be set for the SDK client in the application `httpClient` | url-connection-client | Specifies the http client to be used by the SDK client. Available options are `url-connection-client` (sync), `apache-client` (sync), `netty-nio-client` (async). See [http clients][sdk-http-clients] `handlerClassName` | `"App"`| Specifies the class name of the handler, which will be used as the lambda function name. It should be camel case. `javaSdkVersion` | Same version as the archetype version | Specifies the version of the AWS Java SDK 2.x to be used diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index 1c9d0fb32fd1..f6bf8cdc63ad 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 archetype-lambda diff --git a/archetypes/archetype-lambda/src/main/resources/META-INF/maven/archetype-metadata.xml b/archetypes/archetype-lambda/src/main/resources/META-INF/maven/archetype-metadata.xml index cae4983caaf1..82f7c91a528c 100644 --- a/archetypes/archetype-lambda/src/main/resources/META-INF/maven/archetype-metadata.xml +++ b/archetypes/archetype-lambda/src/main/resources/META-INF/maven/archetype-metadata.xml @@ -39,6 +39,7 @@ (url-connection-client|apache-client|netty-nio-client) + null ^\w+-(\w+-)+\d+$ diff --git a/archetypes/archetype-lambda/src/main/resources/archetype-resources/pom.xml b/archetypes/archetype-lambda/src/main/resources/archetype-resources/pom.xml index 035954af7d98..b51f5f667e6a 100644 --- a/archetypes/archetype-lambda/src/main/resources/archetype-resources/pom.xml +++ b/archetypes/archetype-lambda/src/main/resources/archetype-resources/pom.xml @@ -11,7 +11,7 @@ UTF-8 1.8 1.8 - 3.1.1 + 3.2.1 3.6.1 1.6.0 ${javaSdkVersion} @@ -93,6 +93,15 @@ false ${artifactId} + + + *:* + + + module-info.class + + + diff --git a/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/main/java/DependencyFactory.java b/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/main/java/DependencyFactory.java index f9f4ed2f2e3b..947a0c4d4d03 100644 --- a/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/main/java/DependencyFactory.java +++ b/archetypes/archetype-lambda/src/main/resources/archetype-resources/src/main/java/DependencyFactory.java @@ -3,6 +3,9 @@ package ${package}; import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; +#if ($region == 'null') +import software.amazon.awssdk.core.SdkSystemSetting; +#end import software.amazon.awssdk.http.${httpClientPackageName}; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.${servicePackage}.${serviceClientClassName}; @@ -20,7 +23,11 @@ private DependencyFactory() {} public static ${serviceClientClassName} ${serviceClientVariable}Client() { return ${serviceClientClassName}.builder() .credentialsProvider(EnvironmentVariableCredentialsProvider.create()) +#if ($region == 'null') + .region(Region.of(System.getenv(SdkSystemSetting.AWS_REGION.environmentVariable()))) +#else .region(Region.${regionEnum}) +#end .httpClientBuilder(${httpClientClassName}.builder()) .build(); } diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/archetype.properties b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/archetype.properties index 682c1bdf1f84..63a575c729e8 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/archetype.properties +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/archetype.properties @@ -5,6 +5,6 @@ package=software.amazonaws.test service=dynamodb httpClient=apache-client handlerClassName=MyApacheFunction -region=ap-southeast-1 +region=null javaSdkVersion=2.11.0 nettyOpenSslVersion=2.0.29.Final \ No newline at end of file diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/pom.xml b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/pom.xml index 2f1c10b9e1e4..d34684ea4c6d 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/pom.xml +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/pom.xml @@ -10,7 +10,7 @@ UTF-8 1.8 1.8 - 3.1.1 + 3.2.1 3.6.1 1.6.0 2.11.0 @@ -80,6 +80,15 @@ false test-apache-artifact + + + *:* + + + module-info.class + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java index f6ecbd48a31f..f79a15985d7e 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java +++ b/archetypes/archetype-lambda/src/test/resources/projects/apachehttpclient/reference/src/main/java/software/amazonaws/test/DependencyFactory.java @@ -2,6 +2,7 @@ package software.amazonaws.test; import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; +import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.http.apache.ApacheHttpClient; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.dynamodb.DynamoDbClient; @@ -19,7 +20,7 @@ private DependencyFactory() {} public static DynamoDbClient dynamoDbClient() { return DynamoDbClient.builder() .credentialsProvider(EnvironmentVariableCredentialsProvider.create()) - .region(Region.AP_SOUTHEAST_1) + .region(Region.of(System.getenv(SdkSystemSetting.AWS_REGION.environmentVariable()))) .httpClientBuilder(ApacheHttpClient.builder()) .build(); } diff --git a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/pom.xml b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/pom.xml index fa7e7cce3210..e434477f8aea 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/pom.xml +++ b/archetypes/archetype-lambda/src/test/resources/projects/dynamodbstreamsclient/reference/pom.xml @@ -10,7 +10,7 @@ UTF-8 1.8 1.8 - 3.1.1 + 3.2.1 3.6.1 1.6.0 2.11.0 @@ -80,6 +80,15 @@ false test-dynamodbstreams-artifact + + + *:* + + + module-info.class + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/pom.xml b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/pom.xml index 003cb97c99c7..177dfbb0440b 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/pom.xml +++ b/archetypes/archetype-lambda/src/test/resources/projects/nettyclient/reference/pom.xml @@ -10,7 +10,7 @@ UTF-8 1.8 1.8 - 3.1.1 + 3.2.1 3.6.1 1.6.0 2.11.0 @@ -88,6 +88,15 @@ false test-netty-artifact + + + *:* + + + module-info.class + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/pom.xml b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/pom.xml index addcc8788bcb..510579f12687 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/pom.xml +++ b/archetypes/archetype-lambda/src/test/resources/projects/urlhttpclient/reference/pom.xml @@ -10,7 +10,7 @@ UTF-8 1.8 1.8 - 3.1.1 + 3.2.1 3.6.1 1.6.0 2.11.0 @@ -80,6 +80,15 @@ false test-url-connection-client-artifact + + + *:* + + + module-info.class + + + diff --git a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/pom.xml b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/pom.xml index 68fe5c93a944..9394f5abb2cc 100644 --- a/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/pom.xml +++ b/archetypes/archetype-lambda/src/test/resources/projects/wafregionalclient/reference/pom.xml @@ -10,7 +10,7 @@ UTF-8 1.8 1.8 - 3.1.1 + 3.2.1 3.6.1 1.6.0 2.11.0 @@ -80,6 +80,15 @@ false test-wafregional-artifact + + + *:* + + + module-info.class + + + diff --git a/archetypes/pom.xml b/archetypes/pom.xml index 5fe7d6bf16d5..0d41d8e134de 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 129ce54bc04c..405959c70b1b 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -12,14 +12,12 @@ ~ on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ~ express or implied. See the License for the specific language governing ~ permissions and limitations under the License. - --> - - + --> 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../pom.xml aws-sdk-java @@ -1110,6 +1108,36 @@ Amazon AutoScaling, etc). codestarconnections ${awsjavasdk.version} + + software.amazon.awssdk + synthetics + ${awsjavasdk.version} + + + software.amazon.awssdk + iotsitewise + ${awsjavasdk.version} + + + software.amazon.awssdk + macie2 + ${awsjavasdk.version} + + + software.amazon.awssdk + codeartifact + ${awsjavasdk.version} + + + software.amazon.awssdk + honeycode + ${awsjavasdk.version} + + + software.amazon.awssdk + ivs + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index d2976ddca618..9f3d2d7e3fe4 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 @@ -62,7 +62,7 @@ com.fasterxml.jackson.core jackson-annotations - ${jackson.annotations.version} + ${jackson.version} com.fasterxml.jackson.dataformat diff --git a/bom/pom.xml b/bom/pom.xml index 300d7c1345c3..230c183e7698 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -12,14 +12,12 @@ ~ on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ~ express or implied. See the License for the specific language governing ~ permissions and limitations under the License. - --> - - + --> 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../pom.xml bom @@ -420,6 +418,11 @@ dynamodb ${awsjavasdk.version} + + software.amazon.awssdk + dynamodb-enhanced + ${awsjavasdk.version} + software.amazon.awssdk ec2 @@ -1225,6 +1228,36 @@ codestarconnections ${awsjavasdk.version} + + software.amazon.awssdk + synthetics + ${awsjavasdk.version} + + + software.amazon.awssdk + iotsitewise + ${awsjavasdk.version} + + + software.amazon.awssdk + macie2 + ${awsjavasdk.version} + + + software.amazon.awssdk + codeartifact + ${awsjavasdk.version} + + + software.amazon.awssdk + honeycode + ${awsjavasdk.version} + + + software.amazon.awssdk + ivs + ${awsjavasdk.version} + diff --git a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml index 1d42ef9f8f7f..05001c3fba7b 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml @@ -365,6 +365,14 @@ + + + + + + + + diff --git a/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml b/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml index d1809a003d7d..82126f1a1f92 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml @@ -159,4 +159,22 @@ + + + + + + + + + + + + + + + + + + diff --git a/bundle/pom.xml b/bundle/pom.xml index 3991c99473b2..5ec90da94bd6 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT bundle jar diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index 91ff930aac78..c8dbbb5360d3 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index 5ffe896a7870..f9ec817b4537 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index fe5a1c814810..b5151194df89 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index 53ac4ed537cb..4a6a888ff2ee 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codegen AWS Java SDK :: Code Generator @@ -57,6 +57,11 @@ http-client-spi ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + software.amazon.awssdk regions diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddEmptyInputShape.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddEmptyInputShape.java index 172fcbe137cd..3b1c95677796 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddEmptyInputShape.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddEmptyInputShape.java @@ -15,7 +15,6 @@ package software.amazon.awssdk.codegen; -import static software.amazon.awssdk.codegen.internal.Constant.REQUEST_CLASS_SUFFIX; import static software.amazon.awssdk.codegen.internal.Utils.createInputShapeMarshaller; import static software.amazon.awssdk.codegen.internal.Utils.unCapitalize; @@ -62,7 +61,7 @@ private Map addEmptyInputShapes( Input input = operation.getInput(); if (input == null) { - String inputShape = operationName + REQUEST_CLASS_SUFFIX; + String inputShape = namingStrategy.getRequestClassName(operationName); OperationModel operationModel = javaOperationMap.get(operationName); operationModel.setInput(new VariableModel(unCapitalize(inputShape), inputShape)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddEmptyOutputShape.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddEmptyOutputShape.java index 78ed00857022..8a9d587ace15 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddEmptyOutputShape.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddEmptyOutputShape.java @@ -15,8 +15,6 @@ package software.amazon.awssdk.codegen; -import static software.amazon.awssdk.codegen.internal.Constant.RESPONSE_CLASS_SUFFIX; - import java.util.HashMap; import java.util.Map; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; @@ -58,7 +56,7 @@ private Map addEmptyOutputShapes( Output output = operation.getOutput(); if (output == null) { - String outputShape = operationName + RESPONSE_CLASS_SUFFIX; + String outputShape = namingStrategy.getResponseClassName(operationName); OperationModel operationModel = currentOperations.get(operationName); operationModel.setReturnType(new ReturnTypeModel(outputShape)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java b/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java index 565d567cb7c9..f0ded1cd6e17 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java @@ -106,13 +106,25 @@ public IntermediateModel build() { Map authorizers = new HashMap<>(new AddCustomAuthorizers(this.service, getNamingStrategy()).constructAuthorizers()); + // Iterate through every operation and build an 'endpointOperation' if at least one operation that supports + // endpoint discovery is found. If -any operations that require- endpoint discovery are found, then the flag + // 'endpointCacheRequired' will be set on the 'endpointOperation'. This 'endpointOperation' summary is then + // passed directly into the constructor of the intermediate model and is referred to by the codegen. OperationModel endpointOperation = null; + boolean endpointCacheRequired = false; for (OperationModel o : operations.values()) { if (o.isEndpointOperation()) { endpointOperation = o; - break; } + + if (o.getEndpointDiscovery() != null && o.getEndpointDiscovery().isRequired()) { + endpointCacheRequired = true; + } + } + + if (endpointOperation != null) { + endpointOperation.setEndpointCacheRequired(endpointCacheRequired); } for (IntermediateModelShapeProcessor processor : shapeProcessors) { @@ -168,8 +180,7 @@ private void linkMembersToShapes(IntermediateModel model) { for (Map.Entry entry : model.getShapes().entrySet()) { if (entry.getValue().getMembers() != null) { for (MemberModel member : entry.getValue().getMembers()) { - member.setShape( - Utils.findShapeModelByC2jNameIfExists(model, member.getC2jShape())); + member.setShape(Utils.findMemberShapeModelByC2jNameIfExists(model, member.getC2jShape())); } } } @@ -186,7 +197,9 @@ private void linkOperationsToInputOutputShapes(IntermediateModel model) { if (operation.getOutput() != null) { String outputShapeName = operation.getOutput().getShape(); - entry.getValue().setOutputShape(model.getShapeByC2jName(outputShapeName)); + ShapeModel outputShape = + model.getShapeByNameAndC2jName(entry.getValue().getReturnType().getReturnType(), outputShapeName); + entry.getValue().setOutputShape(outputShape); } } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java b/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java index f99859b8a019..65dc3346fdda 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java @@ -25,6 +25,7 @@ import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; +import software.amazon.awssdk.codegen.model.service.PaginatorDefinition; import software.amazon.awssdk.utils.Pair; import software.amazon.awssdk.utils.StringUtils; @@ -58,7 +59,7 @@ abstract class OperationDocProvider { this.model = model; this.opModel = opModel; this.config = config; - this.paginationDocs = new PaginationDocs(model, opModel); + this.paginationDocs = new PaginationDocs(model, opModel, getPaginatorDefinition()); } /** @@ -180,6 +181,10 @@ final void emitRequestParm(DocumentationBuilder docBuilder) { } } + private PaginatorDefinition getPaginatorDefinition() { + return model.getPaginators().get(opModel.getOperationName()); + } + /** * @return The interface name of the client. Will differ per {@link ClientType}. */ diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/docs/PaginationDocs.java b/codegen/src/main/java/software/amazon/awssdk/codegen/docs/PaginationDocs.java index 67738c7ba8b7..485b034fbe32 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/docs/PaginationDocs.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/docs/PaginationDocs.java @@ -23,6 +23,7 @@ import org.reactivestreams.Subscription; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; +import software.amazon.awssdk.codegen.model.service.PaginatorDefinition; import software.amazon.awssdk.codegen.poet.PoetExtensions; import software.amazon.awssdk.codegen.utils.PaginatorUtils; import software.amazon.awssdk.utils.async.SequentialSubscriber; @@ -33,10 +34,13 @@ public class PaginationDocs { private final OperationModel operationModel; private final PoetExtensions poetExtensions; + private final PaginatorDefinition paginatorDefinition; - public PaginationDocs(IntermediateModel intermediateModel, OperationModel operationModel) { + public PaginationDocs(IntermediateModel intermediateModel, OperationModel operationModel, + PaginatorDefinition paginatorDefinition) { this.operationModel = operationModel; this.poetExtensions = new PoetExtensions(intermediateModel); + this.paginatorDefinition = paginatorDefinition; } /** @@ -149,6 +153,7 @@ private String getSyncCodeSnippets() { .add(callOperationOnClient) .addStatement("responses.iterator().forEachRemaining(....)") .build())) + .add(noteAboutLimitConfigurationMethod()) .add(noteAboutSyncNonPaginatedMethod()) .build() .toString(); @@ -186,6 +191,7 @@ private String getAsyncCodeSnippets() { .build())) .add("As the response is a publisher, it can work well with third party reactive streams implementations " + "like RxJava2.") + .add(noteAboutLimitConfigurationMethod()) .add(noteAboutSyncNonPaginatedMethod()) .build() .toString(); @@ -238,6 +244,18 @@ private ClassName asyncPaginatedResponseType() { return poetExtensions.getResponseClassForPaginatedAsyncOperation(operationModel.getOperationName()); } + private String getPaginatorLimitKeyName() { + return paginatorDefinition != null ? paginatorDefinition.getLimitKey() : ""; + } + + private CodeBlock noteAboutLimitConfigurationMethod() { + return CodeBlock.builder() + .add("\n

Please notice that the configuration of $L won't limit the number of results " + + "you get with the paginator. It only limits the number of results in each page.

", + getPaginatorLimitKeyName()) + .build(); + } + private CodeBlock noteAboutSyncNonPaginatedMethod() { return CodeBlock.builder() .add("\n

Note: If you prefer to have control on service calls, use the {@link #$L($T)} operation." diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java index a4288ae7fd05..5f92b8757083 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.codegen.model.intermediate.Metadata; import software.amazon.awssdk.codegen.model.intermediate.ShapeMarshaller; import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; +import software.amazon.awssdk.codegen.model.intermediate.ShapeType; import software.amazon.awssdk.codegen.model.service.Input; import software.amazon.awssdk.codegen.model.service.Operation; import software.amazon.awssdk.codegen.model.service.ServiceMetadata; @@ -289,6 +290,28 @@ public static ShapeModel findShapeModelByC2jNameIfExists(IntermediateModel inter return null; } + /** + * Search for a shape model by its C2J name, excluding request and response shapes, which are not candidates to be members + * of another shape. + * + * @return ShapeModel or null if the shape doesn't exist (if it's primitive or container type for example) + */ + public static ShapeModel findMemberShapeModelByC2jNameIfExists(IntermediateModel intermediateModel, String shapeC2jName) { + ShapeModel candidate = null; + for (ShapeModel shape : intermediateModel.getShapes().values()) { + if (shape.getShapeType() != ShapeType.Request + && shape.getShapeType() != ShapeType.Response + && shape.getC2jName().equals(shapeC2jName)) { + if (candidate != null) { + throw new IllegalStateException("Conflicting candidates for member model with C2J name " + shapeC2jName + ": " + + candidate + " and " + shape); + } + candidate = shape; + } + } + return candidate; + } + public static List findShapesByC2jName(IntermediateModel intermediateModel, String shapeC2jName) { return intermediateModel.getShapes().values().stream().filter(s -> s.getC2jName().equals(shapeC2jName)).collect(toList()); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java index a56e74414697..8b2230310c46 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java @@ -159,6 +159,12 @@ public class CustomizationConfig { */ private UtilitiesMethod utilitiesMethod; + /** + * Force generation of deprecated client builder method 'enableEndpointDiscovery'. Only services that already had + * this method when it was deprecated require this flag to be set. + */ + private boolean enableEndpointDiscoveryMethodRequired = false; + private CustomizationConfig() { } @@ -406,4 +412,12 @@ public UtilitiesMethod getUtilitiesMethod() { public void setUtilitiesMethod(UtilitiesMethod utilitiesMethod) { this.utilitiesMethod = utilitiesMethod; } + + public boolean isEnableEndpointDiscoveryMethodRequired() { + return enableEndpointDiscoveryMethodRequired; + } + + public void setEnableEndpointDiscoveryMethodRequired(boolean enableEndpointDiscoveryMethodRequired) { + this.enableEndpointDiscoveryMethodRequired = enableEndpointDiscoveryMethodRequired; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java index 1b04bf63fda2..a365465db74f 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModel.java @@ -27,7 +27,6 @@ import java.util.stream.Collectors; import software.amazon.awssdk.awscore.AwsResponse; import software.amazon.awssdk.awscore.AwsResponseMetadata; -import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; import software.amazon.awssdk.codegen.model.service.PaginatorDefinition; import software.amazon.awssdk.codegen.naming.NamingStrategy; @@ -104,8 +103,21 @@ public Map getShapes() { return shapes; } - public ShapeModel getShapeByC2jName(String c2jName) { - return Utils.findShapeModelByC2jName(this, c2jName); + /** + * Looks up a shape by name and verifies that the expected C2J name matches + * @param shapeName the name of the shape in the intermediate model + * @param shapeC2jName C2J's name for the shape + * @return the ShapeModel + * @throws IllegalArgumentException if no matching shape is found + */ + public ShapeModel getShapeByNameAndC2jName(String shapeName, String shapeC2jName) { + for (ShapeModel sm : getShapes().values()) { + if (shapeName.equals(sm.getShapeName()) && shapeC2jName.equals(sm.getC2jName())) { + return sm; + } + } + throw new IllegalArgumentException("C2J shape " + shapeC2jName + " with shape name " + shapeName + " does not exist in " + + "the intermediate model."); } public CustomizationConfig getCustomizationConfig() { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java index de862f089813..aa1b55f8a906 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java @@ -50,6 +50,8 @@ public class OperationModel extends DocumentationModel { private boolean endpointOperation; + private boolean endpointCacheRequired; + private EndpointDiscovery endpointDiscovery; @JsonIgnore @@ -207,6 +209,14 @@ public void setEndpointOperation(boolean endpointOperation) { this.endpointOperation = endpointOperation; } + public boolean isEndpointCacheRequired() { + return endpointCacheRequired; + } + + public void setEndpointCacheRequired(boolean endpointCacheRequired) { + this.endpointCacheRequired = endpointCacheRequired; + } + public boolean isPaginated() { return isPaginated; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java index 1367f5428f85..02d1e85636ac 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java @@ -50,6 +50,8 @@ public class DefaultNamingStrategy implements NamingStrategy { private static Logger log = Logger.loggerFor(DefaultNamingStrategy.class); + private static final String COLLISION_DISAMBIGUATION_PREFIX = "Default"; + private static final Set RESERVED_KEYWORDS; private static final Set RESERVED_EXCEPTION_METHOD_NAMES; @@ -191,23 +193,39 @@ private String getCustomizedPackageName(String serviceName, String defaultPatter @Override public String getExceptionName(String errorShapeName) { + String baseName; if (errorShapeName.endsWith(FAULT_CLASS_SUFFIX)) { - return pascalCase(errorShapeName.substring(0, errorShapeName.length() - FAULT_CLASS_SUFFIX.length())) + + baseName = pascalCase(errorShapeName.substring(0, errorShapeName.length() - FAULT_CLASS_SUFFIX.length())) + EXCEPTION_CLASS_SUFFIX; } else if (errorShapeName.endsWith(EXCEPTION_CLASS_SUFFIX)) { - return pascalCase(errorShapeName); + baseName = pascalCase(errorShapeName); + } else { + baseName = pascalCase(errorShapeName) + EXCEPTION_CLASS_SUFFIX; + } + if (baseName.equals(getServiceName() + EXCEPTION_CLASS_SUFFIX)) { + return COLLISION_DISAMBIGUATION_PREFIX + baseName; } - return pascalCase(errorShapeName) + EXCEPTION_CLASS_SUFFIX; + return baseName; } @Override public String getRequestClassName(String operationName) { - return pascalCase(operationName) + REQUEST_CLASS_SUFFIX; + String baseName = pascalCase(operationName) + REQUEST_CLASS_SUFFIX; + if (!operationName.equals(getServiceName())) { + return baseName; + } + + return COLLISION_DISAMBIGUATION_PREFIX + baseName; } @Override public String getResponseClassName(String operationName) { - return pascalCase(operationName) + RESPONSE_CLASS_SUFFIX; + String baseName = pascalCase(operationName) + RESPONSE_CLASS_SUFFIX; + if (!operationName.equals(getServiceName())) { + return baseName; + } + + return COLLISION_DISAMBIGUATION_PREFIX + baseName; } @Override @@ -299,14 +317,20 @@ public String getExistenceCheckMethodName(String memberName, Shape parentShape) @Override public String getBeanStyleGetterMethodName(String memberName, Shape parentShape, Shape c2jShape) { - String fluentGetterMethodName = getFluentGetterMethodName(memberName, parentShape, c2jShape); + String fluentGetterMethodName; + if (Utils.isOrContainsEnumShape(c2jShape, serviceModel.getShapes())) { + // Use the enum (modeled) name for bean-style getters + fluentGetterMethodName = getFluentEnumGetterMethodName(memberName, parentShape, c2jShape); + } else { + fluentGetterMethodName = getFluentGetterMethodName(memberName, parentShape, c2jShape); + } return String.format("get%s", Utils.capitalize(fluentGetterMethodName)); } @Override public String getBeanStyleSetterMethodName(String memberName, Shape parentShape, Shape c2jShape) { - String fluentSetterMethodName = getFluentSetterMethodName(memberName, parentShape, c2jShape); - return String.format("set%s", Utils.capitalize(fluentSetterMethodName)); + String beanStyleGetter = getBeanStyleGetterMethodName(memberName, parentShape, c2jShape); + return String.format("set%s", beanStyleGetter.substring("get".length())); } @Override diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java index 4686ea235649..626e176481b7 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java @@ -54,15 +54,32 @@ public TypeSpec poetSpec() { .addJavadoc("Internal implementation of {@link $T}.", builderInterfaceName); if (model.getEndpointOperation().isPresent()) { - builder.addMethod(enableEndpointDiscovery()); + builder.addMethod(endpointDiscoveryEnabled()); + + if (model.getCustomizationConfig().isEnableEndpointDiscoveryMethodRequired()) { + builder.addMethod(enableEndpointDiscovery()); + } } return builder.addMethod(buildClientMethod()).build(); } + private MethodSpec endpointDiscoveryEnabled() { + return MethodSpec.methodBuilder("endpointDiscoveryEnabled") + .addAnnotation(Override.class) + .addModifiers(Modifier.PUBLIC) + .returns(builderClassName) + .addParameter(boolean.class, "endpointDiscoveryEnabled") + .addStatement("this.endpointDiscoveryEnabled = endpointDiscoveryEnabled") + .addStatement("return this") + .build(); + } + private MethodSpec enableEndpointDiscovery() { return MethodSpec.methodBuilder("enableEndpointDiscovery") .addAnnotation(Override.class) + .addAnnotation(Deprecated.class) + .addJavadoc("@deprecated Use {@link #endpointDiscoveryEnabled($T)} instead.", boolean.class) .addModifiers(Modifier.PUBLIC) .returns(builderClassName) .addStatement("endpointDiscoveryEnabled = true") diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java index c94af0736de7..8d7a13e9291e 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java @@ -35,6 +35,7 @@ import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.OperationModel; import software.amazon.awssdk.codegen.model.service.AuthType; import software.amazon.awssdk.codegen.poet.ClassSpec; import software.amazon.awssdk.codegen.poet.PoetUtils; @@ -78,10 +79,12 @@ public TypeSpec poetSpec() { ClassName.get(basePackage, model.getMetadata().getSyncBuilder()), ClassName.get(basePackage, model.getMetadata().getAsyncBuilder())); + // Only services that require endpoint discovery for at least one of their operations get a default value of + // 'true' if (model.getEndpointOperation().isPresent()) { builder.addField(FieldSpec.builder(boolean.class, "endpointDiscoveryEnabled") .addModifiers(PROTECTED) - .initializer("false") + .initializer(resolveDefaultEndpointDiscovery() ? "true" : "false") .build()); } @@ -102,6 +105,12 @@ public TypeSpec poetSpec() { return builder.build(); } + private boolean resolveDefaultEndpointDiscovery() { + return model.getEndpointOperation() + .map(OperationModel::isEndpointCacheRequired) + .orElse(false); + } + private MethodSpec signingNameMethod() { return MethodSpec.methodBuilder("signingName") .addAnnotation(Override.class) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderInterface.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderInterface.java index eca3edac13b0..f9e59dcfdb11 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderInterface.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderInterface.java @@ -50,7 +50,11 @@ public TypeSpec poetSpec() { .addJavadoc(getJavadoc()); if (model.getEndpointOperation().isPresent()) { - builder.addMethod(enableEndpointDiscovery()); + if (model.getCustomizationConfig().isEnableEndpointDiscoveryMethodRequired()) { + builder.addMethod(enableEndpointDiscovery()); + } + + builder.addMethod(endpointDiscovery()); } if (model.getCustomizationConfig().getServiceSpecificClientConfigClass() != null) { @@ -72,6 +76,16 @@ private MethodSpec enableEndpointDiscovery() { return MethodSpec.methodBuilder("enableEndpointDiscovery") .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) .returns(TypeVariableName.get("B")) + .addAnnotation(Deprecated.class) + .addJavadoc("@deprecated Use {@link #endpointDiscoveryEnabled($T)} instead.", boolean.class) + .build(); + } + + private MethodSpec endpointDiscovery() { + return MethodSpec.methodBuilder("endpointDiscoveryEnabled") + .addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT) + .returns(TypeVariableName.get("B")) + .addParameter(boolean.class, "endpointDiscovery") .build(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java index 323415e20241..aa9b89d450c9 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/SyncClientBuilderClass.java @@ -54,15 +54,32 @@ public TypeSpec poetSpec() { .addJavadoc("Internal implementation of {@link $T}.", builderInterfaceName); if (model.getEndpointOperation().isPresent()) { - builder.addMethod(enableEndpointDiscovery()); + builder.addMethod(endpointDiscoveryEnabled()); + + if (model.getCustomizationConfig().isEnableEndpointDiscoveryMethodRequired()) { + builder.addMethod(enableEndpointDiscovery()); + } } return builder.addMethod(buildClientMethod()).build(); } + private MethodSpec endpointDiscoveryEnabled() { + return MethodSpec.methodBuilder("endpointDiscoveryEnabled") + .addAnnotation(Override.class) + .addModifiers(Modifier.PUBLIC) + .returns(builderClassName) + .addParameter(boolean.class, "endpointDiscoveryEnabled") + .addStatement("this.endpointDiscoveryEnabled = endpointDiscoveryEnabled") + .addStatement("return this") + .build(); + } + private MethodSpec enableEndpointDiscovery() { return MethodSpec.methodBuilder("enableEndpointDiscovery") .addAnnotation(Override.class) + .addAnnotation(Deprecated.class) + .addJavadoc("@deprecated Use {@link #endpointDiscoveryEnabled($T)} instead.", boolean.class) .addModifiers(Modifier.PUBLIC) .returns(builderClassName) .addStatement("endpointDiscoveryEnabled = true") diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java index b7045285fd95..cdb25d0f3016 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java @@ -18,6 +18,7 @@ import static com.squareup.javapoet.TypeSpec.Builder; import static java.util.Collections.singletonList; import static javax.lang.model.element.Modifier.PRIVATE; +import static javax.lang.model.element.Modifier.STATIC; import static software.amazon.awssdk.codegen.poet.client.ClientClassUtils.applyPaginatorUserAgentMethod; import static software.amazon.awssdk.codegen.poet.client.ClientClassUtils.applySignerOverrideMethod; import static software.amazon.awssdk.codegen.poet.client.SyncClientClass.getProtocolSpecs; @@ -31,6 +32,7 @@ import com.squareup.javapoet.TypeSpec; import java.net.URI; import java.nio.ByteBuffer; +import java.util.Collections; import java.util.List; import java.util.concurrent.Executor; import java.util.stream.Collectors; @@ -46,7 +48,6 @@ import software.amazon.awssdk.codegen.emitters.GeneratorTaskParams; import software.amazon.awssdk.codegen.model.config.customization.UtilitiesMethod; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; -import software.amazon.awssdk.codegen.model.intermediate.MemberModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; import software.amazon.awssdk.codegen.poet.PoetExtensions; @@ -54,6 +55,7 @@ import software.amazon.awssdk.codegen.poet.StaticImport; import software.amazon.awssdk.codegen.poet.client.specs.ProtocolSpec; import software.amazon.awssdk.codegen.poet.eventstream.EventStreamUtils; +import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.client.config.SdkAdvancedAsyncClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; @@ -61,11 +63,16 @@ import software.amazon.awssdk.core.client.handler.AsyncClientHandler; import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRefreshCache; import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRequest; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; import software.amazon.awssdk.utils.CompletableFutureUtils; import software.amazon.awssdk.utils.FunctionalUtils; public final class AsyncClientClass extends AsyncClientInterface { + private static final String PUBLISHER_NAME = "metricPublishers"; + private static final String METRIC_COLLECTOR_NAME = "apiCallMetricCollector"; private final IntermediateModel model; private final PoetExtensions poetExtensions; private final ClassName className; @@ -101,7 +108,8 @@ public TypeSpec poetSpec() { .addMethods(operations()) .addMethod(closeMethod()) .addMethods(protocolSpec.additionalMethods()) - .addMethod(protocolSpec.initProtocolFactory(model)); + .addMethod(protocolSpec.initProtocolFactory(model)) + .addMethod(resolveMetricPublishersMethod()); // Kinesis doesn't support CBOR for STS yet so need another protocol factory for JSON if (model.getMetadata().isCborProtocol()) { @@ -190,9 +198,18 @@ private MethodSpec closeMethod() { protected MethodSpec.Builder operationBody(MethodSpec.Builder builder, OperationModel opModel) { builder.addModifiers(Modifier.PUBLIC) - .addAnnotation(Override.class) - .beginControlFlow("try") - .addCode(ClientClassUtils.callApplySignerOverrideMethod(opModel)) + .addAnnotation(Override.class); + + builder.addStatement("$1T $2N = $1T.create($3S)", + MetricCollector.class, METRIC_COLLECTOR_NAME, "ApiCall"); + builder.beginControlFlow("try"); + + builder.addStatement("$N.reportMetric($T.$L, $S)", METRIC_COLLECTOR_NAME, CoreMetric.class, "SERVICE_ID", + model.getMetadata().getServiceId()); + builder.addStatement("$N.reportMetric($T.$L, $S)", METRIC_COLLECTOR_NAME, CoreMetric.class, "OPERATION_NAME", + opModel.getOperationName()); + + builder.addCode(ClientClassUtils.callApplySignerOverrideMethod(opModel)) .addCode(ClientClassUtils.addEndpointTraitCode(opModel)) .addCode(protocolSpec.responseHandler(model, opModel)); protocolSpec.errorResponseHandler(opModel).ifPresent(builder::addCode); @@ -224,8 +241,16 @@ protected MethodSpec.Builder operationBody(MethodSpec.Builder builder, Operation "() -> $N.exceptionOccurred(t))", paramName); } - return builder.addStatement("return $T.failedFuture(t)", CompletableFutureUtils.class) - .endControlFlow(); + builder.addStatement("$T<$T> $N = resolveMetricPublishers(clientConfiguration, $N.overrideConfiguration().orElse(null))", + List.class, + MetricPublisher.class, + PUBLISHER_NAME, + opModel.getInput().getVariableName()) + .addStatement("$N.forEach(p -> p.publish($N.collect()))", PUBLISHER_NAME, "apiCallMetricCollector") + .addStatement("return $T.failedFuture(t)", CompletableFutureUtils.class) + .endControlFlow(); + + return builder; } @Override @@ -273,7 +298,7 @@ private CodeBlock createEventStreamTaggedUnionJsonMarshaller(ShapeModel eventStr EventStreamTaggedUnionJsonMarshaller.class); List eventNames = EventStreamUtils.getEventMembers(eventStreamShape) - .map(MemberModel::getC2jName) + .map(m -> m.getShape().getShapeName()) .collect(Collectors.toList()); eventNames.forEach(event -> builder.add(".putMarshaller($T.class, new $T(protocolFactory))", @@ -301,4 +326,39 @@ private MethodSpec utilitiesMethod() { String.join(",", config.getCreateMethodParams())) .build(); } + + private MethodSpec resolveMetricPublishersMethod() { + String clientConfigName = "clientConfiguration"; + String requestOverrideConfigName = "requestOverrideConfiguration"; + + MethodSpec.Builder methodBuilder = MethodSpec.methodBuilder("resolveMetricPublishers") + .addModifiers(PRIVATE, STATIC) + .returns(ParameterizedTypeName.get(List.class, MetricPublisher.class)) + .addParameter(SdkClientConfiguration.class, clientConfigName) + .addParameter(RequestOverrideConfiguration.class, requestOverrideConfigName); + + String publishersName = "publishers"; + + methodBuilder.addStatement("$T $N = null", ParameterizedTypeName.get(List.class, MetricPublisher.class), publishersName); + + methodBuilder.beginControlFlow("if ($N != null)", requestOverrideConfigName) + .addStatement("$N = $N.metricPublishers()", publishersName, requestOverrideConfigName) + .endControlFlow(); + + methodBuilder.beginControlFlow("if ($1N == null || $1N.isEmpty())", publishersName) + .addStatement("$N = $N.option($T.$N)", + publishersName, + clientConfigName, + SdkClientOption.class, + "METRIC_PUBLISHERS") + .endControlFlow(); + + methodBuilder.beginControlFlow("if ($1N == null)", publishersName) + .addStatement("$N = $T.emptyList()", publishersName, Collections.class) + .endControlFlow(); + + methodBuilder.addStatement("return $N", publishersName); + + return methodBuilder.build(); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java index cd26f0fc0a5b..d330e2959eae 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java @@ -17,16 +17,19 @@ import static javax.lang.model.element.Modifier.FINAL; import static javax.lang.model.element.Modifier.PRIVATE; +import static javax.lang.model.element.Modifier.STATIC; import static software.amazon.awssdk.codegen.poet.client.ClientClassUtils.applyPaginatorUserAgentMethod; import static software.amazon.awssdk.codegen.poet.client.ClientClassUtils.applySignerOverrideMethod; import com.squareup.javapoet.ClassName; import com.squareup.javapoet.FieldSpec; import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.ParameterizedTypeName; import com.squareup.javapoet.TypeSpec; import com.squareup.javapoet.TypeSpec.Builder; import java.net.URI; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.stream.Collectors; import javax.lang.model.element.Modifier; @@ -47,11 +50,15 @@ import software.amazon.awssdk.codegen.poet.client.specs.QueryProtocolSpec; import software.amazon.awssdk.codegen.poet.client.specs.XmlProtocolSpec; import software.amazon.awssdk.codegen.utils.PaginatorUtils; +import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRefreshCache; import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRequest; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricPublisher; //TODO Make SyncClientClass extend SyncClientInterface (similar to what we do in AsyncClientClass) public class SyncClientClass implements ClassSpec { @@ -84,7 +91,8 @@ public TypeSpec poetSpec() { .addMethod(constructor()) .addMethod(nameMethod()) .addMethods(protocolSpec.additionalMethods()) - .addMethods(operations()); + .addMethods(operations()) + .addMethod(resolveMetricPublishersMethod()); protocolSpec.createErrorResponseHandler().ifPresent(classBuilder::addMethod); @@ -186,7 +194,28 @@ private List operationMethodSpecs(OperationModel opModel) { method.endControlFlow(); } - method.addCode(protocolSpec.executionHandler(opModel)); + String metricCollectorName = "apiCallMetricCollector"; + + method.addStatement("$1T $2N = $1T.create($3S)", + MetricCollector.class, metricCollectorName, "ApiCall"); + + String publishersName = "metricPublishers"; + + method.beginControlFlow("try") + .addStatement("$N.reportMetric($T.$L, $S)", metricCollectorName, CoreMetric.class, "SERVICE_ID", + model.getMetadata().getServiceId()) + .addStatement("$N.reportMetric($T.$L, $S)", metricCollectorName, CoreMetric.class, "OPERATION_NAME", + opModel.getOperationName()) + .addCode(protocolSpec.executionHandler(opModel)) + .endControlFlow() + .beginControlFlow("finally") + .addStatement("$T<$T> $N = resolveMetricPublishers(clientConfiguration, $N.overrideConfiguration().orElse(null))", + List.class, + MetricPublisher.class, + publishersName, + opModel.getInput().getVariableName()) + .addStatement("$N.forEach(p -> p.publish($N.collect()))", publishersName, metricCollectorName) + .endControlFlow(); methods.add(method.build()); @@ -259,4 +288,39 @@ static ProtocolSpec getProtocolSpecs(PoetExtensions poetExtensions, Intermediate throw new RuntimeException("Unknown protocol: " + protocol.name()); } } + + private MethodSpec resolveMetricPublishersMethod() { + String clientConfigName = "clientConfiguration"; + String requestOverrideConfigName = "requestOverrideConfiguration"; + + MethodSpec.Builder methodBuilder = MethodSpec.methodBuilder("resolveMetricPublishers") + .addModifiers(PRIVATE, STATIC) + .returns(ParameterizedTypeName.get(List.class, MetricPublisher.class)) + .addParameter(SdkClientConfiguration.class, clientConfigName) + .addParameter(RequestOverrideConfiguration.class, requestOverrideConfigName); + + String publishersName = "publishers"; + + methodBuilder.addStatement("$T $N = null", ParameterizedTypeName.get(List.class, MetricPublisher.class), publishersName); + + methodBuilder.beginControlFlow("if ($N != null)", requestOverrideConfigName) + .addStatement("$N = $N.metricPublishers()", publishersName, requestOverrideConfigName) + .endControlFlow(); + + methodBuilder.beginControlFlow("if ($1N == null || $1N.isEmpty())", publishersName) + .addStatement("$N = $N.option($T.$N)", + publishersName, + clientConfigName, + SdkClientOption.class, + "METRIC_PUBLISHERS") + .endControlFlow(); + + methodBuilder.beginControlFlow("if ($1N == null)", publishersName) + .addStatement("$N = $T.emptyList()", publishersName, Collections.class) + .endControlFlow(); + + methodBuilder.addStatement("return $N", publishersName); + + return methodBuilder.build(); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java index 83e01de1c01f..30f089297401 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java @@ -28,6 +28,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import javax.lang.model.element.Modifier; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.awscore.eventstream.EventStreamAsyncResponseTransformer; import software.amazon.awssdk.awscore.eventstream.EventStreamTaggedUnionPojoSupplier; import software.amazon.awssdk.awscore.eventstream.RestEventStreamAsyncResponseTransformer; @@ -39,6 +40,7 @@ import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; import software.amazon.awssdk.codegen.poet.PoetExtensions; import software.amazon.awssdk.codegen.poet.eventstream.EventStreamUtils; +import software.amazon.awssdk.core.SdkPojoBuilder; import software.amazon.awssdk.core.SdkResponse; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.client.handler.AttachHttpMetadataResponseHandler; @@ -177,6 +179,8 @@ public CodeBlock executionHandler(OperationModel opModel) { "errorResponseHandler", opModel.getInput().getVariableName()); + codeBlock.add(".withMetricCollector($N)", "apiCallMetricCollector"); + if (opModel.hasStreamingInput()) { codeBlock.add(".withRequestBody(requestBody)") .add(".withMarshaller($L)", syncStreamingMarshaller(model, opModel, marshaller)); @@ -239,6 +243,7 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper "$L" + ".withResponseHandler($L)\n" + ".withErrorResponseHandler(errorResponseHandler)\n" + + ".withMetricCollector(apiCallMetricCollector)\n" + hostPrefixExpression(opModel) + discoveredEndpoint(opModel) + asyncRequestBody + @@ -263,7 +268,13 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper asyncResponseTransformerVariable(isStreaming, isRestJson, opModel)); String whenComplete = whenCompleteBody(opModel, customerResponseHandler); if (!whenComplete.isEmpty()) { - builder.add("executeFuture$L;", whenComplete); + String whenCompletedFutureName = "whenCompleted"; + builder.addStatement("$T requestOverrideConfig = $L.overrideConfiguration().orElse(null)", + AwsRequestOverrideConfiguration.class, opModel.getInput().getVariableName()); + builder.addStatement("$T<$T> $N = $N$L", CompletableFuture.class, executeFutureValueType, + whenCompletedFutureName, "executeFuture", whenComplete); + builder.addStatement("executeFuture = $T.forwardExceptionTo($N, executeFuture)", + CompletableFutureUtils.class, whenCompletedFutureName); } if (opModel.hasEventStreamOutput()) { builder.addStatement("return $T.forwardExceptionTo(future, executeFuture)", CompletableFutureUtils.class); @@ -323,7 +334,7 @@ private String whenCompleteBody(OperationModel operationModel, String responseHa return streamingOutputWhenComplete(responseHandlerName); } else { // Non streaming can just return the future as is - return ""; + return publishMetricsWhenComplete(); } } @@ -333,6 +344,7 @@ private String whenCompleteBody(OperationModel operationModel, String responseHa * {@link EventStreamAsyncResponseTransformer}. Failure is notified via the normal future (the one returned by the client * handler). * + * * @param responseHandlerName Variable name of response handler customer passed in. * @return whenComplete to append to future. */ @@ -344,12 +356,12 @@ private String eventStreamOutputWhenComplete(String responseHandlerName) { + " } finally {" + " future.completeExceptionally(e);" + " }" - + " }%n" - + "})", responseHandlerName); + + " }" + + "%s" + + "})", responseHandlerName, publishMetrics()); } - @Override public Optional createErrorResponseHandler() { ClassName httpResponseHandler = ClassName.get(HttpResponseHandler.class); @@ -417,11 +429,11 @@ private void responseHandlersForEventStreaming(OperationModel opModel, TypeName protocolFactory, JsonOperationMetadata.class, ClassName.get(EventStreamTaggedUnionPojoSupplier.class)); - EventStreamUtils.getEvents(eventStream) + EventStreamUtils.getEventMembers(eventStream) .forEach(m -> builder.add(".putSdkPojoSupplier(\"$L\", $T::builder)\n", - m.getC2jName(), poetExtensions.getModelClass(m.getC2jName()))); - builder.add(".defaultSdkPojoSupplier(() -> $T.UNKNOWN)\n" - + ".build());\n", eventStreamBaseClass); + m.getC2jName(), poetExtensions.getModelClass(m.getShape().getC2jName()))); + builder.add(".defaultSdkPojoSupplier(() -> new $T($T.UNKNOWN))\n" + + ".build());\n", SdkPojoBuilder.class, eventStreamBaseClass); } private String protocolFactoryLiteral(IntermediateModel model, OperationModel opModel) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/ProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/ProtocolSpec.java index 38ba7afc1f8f..f374500e8997 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/ProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/ProtocolSpec.java @@ -155,7 +155,8 @@ default String streamingOutputWhenComplete(String responseHandlerName) { + " runAndLogError(log, \"Exception thrown in exceptionOccurred callback, ignoring\", () " + "-> %s.exceptionOccurred(e));%n" + " }%n" - + "})", responseHandlerName); + + "%s" + + "})", responseHandlerName, publishMetrics()); } @@ -177,4 +178,16 @@ default TypeName executeFutureValueType(OperationModel opModel, PoetExtensions p default TypeName getPojoResponseType(OperationModel opModel, PoetExtensions poetExtensions) { return poetExtensions.getModelClass(opModel.getReturnType().getReturnType()); } + + default String publishMetricsWhenComplete() { + return String.format(".whenComplete((r, e) -> {%n" + + "%s%n" + + "})", publishMetrics()); + } + + default String publishMetrics() { + return "List metricPublishers = resolveMetricPublishers(clientConfiguration, " + + "requestOverrideConfig);\n" + + "metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));"; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java index cd6b0b2dc94d..1c6327a0e063 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java @@ -24,6 +24,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import javax.lang.model.element.Modifier; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.awscore.exception.AwsServiceException; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; @@ -31,6 +32,7 @@ import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.protocols.query.AwsQueryProtocolFactory; +import software.amazon.awssdk.utils.CompletableFutureUtils; public class QueryProtocolSpec implements ProtocolSpec { @@ -113,6 +115,9 @@ public CodeBlock executionHandler(OperationModel opModel) { "responseHandler", "errorResponseHandler", opModel.getInput().getVariableName()); + + codeBlock.add(".withMetricCollector($N)", "apiCallMetricCollector"); + if (opModel.hasStreamingInput()) { return codeBlock.add(".withRequestBody(requestBody)") .add(".withMarshaller($L));", syncStreamingMarshaller(intermediateModel, opModel, marshaller)) @@ -137,6 +142,7 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper ".withMarshaller($L)" + ".withResponseHandler(responseHandler)" + ".withErrorResponseHandler($N)\n" + + ".withMetricCollector(apiCallMetricCollector)\n" + hostPrefixExpression(opModel) + asyncRequestBody + ".withInput($L) $L);", @@ -151,11 +157,20 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper "errorResponseHandler", opModel.getInput().getVariableName(), opModel.hasStreamingOutput() ? ", asyncResponseTransformer" : ""); + builder.addStatement("$T requestOverrideConfig = $L.overrideConfiguration().orElse(null)", + AwsRequestOverrideConfiguration.class, opModel.getInput().getVariableName()); + String whenCompleteFutureName = "whenCompleteFuture"; + builder.addStatement("$T $N = null", ParameterizedTypeName.get(ClassName.get(CompletableFuture.class), + executeFutureValueType), whenCompleteFutureName); if (opModel.hasStreamingOutput()) { - builder.add("executeFuture$L;", streamingOutputWhenComplete("asyncResponseTransformer")); + builder.addStatement("$N = executeFuture$L", whenCompleteFutureName, + streamingOutputWhenComplete("asyncResponseTransformer")); + } else { + builder.addStatement("$N = executeFuture$L", whenCompleteFutureName, publishMetricsWhenComplete()); } - builder.addStatement("return executeFuture"); + builder.addStatement("return $T.forwardExceptionTo($N, executeFuture)", CompletableFutureUtils.class, + whenCompleteFutureName); return builder.build(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java index 55beb7078878..782c3a02bb48 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java @@ -20,12 +20,15 @@ import com.squareup.javapoet.ParameterizedTypeName; import com.squareup.javapoet.TypeName; import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; import software.amazon.awssdk.codegen.poet.PoetExtensions; import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.protocols.xml.AwsXmlProtocolFactory; import software.amazon.awssdk.protocols.xml.XmlOperationMetadata; +import software.amazon.awssdk.utils.CompletableFutureUtils; public final class XmlProtocolSpec extends QueryProtocolSpec { @@ -111,6 +114,7 @@ public CodeBlock executionHandler(OperationModel opModel) { .add("\n\nreturn clientHandler.execute(new $T<$T, $T>()" + ".withOperationName(\"$N\")\n" + ".withCombinedResponseHandler($N)" + + ".withMetricCollector(apiCallMetricCollector)\n" + hostPrefixExpression(opModel) + discoveredEndpoint(opModel) + ".withInput($L)", @@ -120,6 +124,9 @@ public CodeBlock executionHandler(OperationModel opModel) { opModel.getOperationName(), "responseHandler", opModel.getInput().getVariableName()); + + codeBlock.add(".withMetricCollector($N)", "apiCallMetricCollector"); + if (opModel.hasStreamingInput()) { return codeBlock.add(".withRequestBody(requestBody)") .add(".withMarshaller($L));", syncStreamingMarshaller(intermediateModel, opModel, marshaller)) @@ -166,10 +173,20 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper opModel.getInput().getVariableName(), opModel.hasStreamingOutput() ? ", asyncResponseTransformer" : ""); + builder.addStatement("$T requestOverrideConfig = $L.overrideConfiguration().orElse(null)", + AwsRequestOverrideConfiguration.class, opModel.getInput().getVariableName()); + + String whenCompleteFutureName = "whenCompleteFuture"; + builder.addStatement("$T $N = null", ParameterizedTypeName.get(ClassName.get(CompletableFuture.class), + executeFutureValueType), whenCompleteFutureName); if (opModel.hasStreamingOutput()) { - builder.add("executeFuture$L;", streamingOutputWhenComplete("asyncResponseTransformer")); + builder.addStatement("$N = executeFuture$L", whenCompleteFutureName, + streamingOutputWhenComplete("asyncResponseTransformer")); + } else { + builder.addStatement("$N = executeFuture$L", whenCompleteFutureName, publishMetricsWhenComplete()); } - builder.addStatement("return executeFuture"); + builder.addStatement("return $T.forwardExceptionTo($N, executeFuture)", CompletableFutureUtils.class, + whenCompleteFutureName); return builder.build(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/endpointdiscovery/EndpointDiscoveryAsyncCacheLoaderGenerator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/endpointdiscovery/EndpointDiscoveryAsyncCacheLoaderGenerator.java index 2ebf9a991d4d..a0042abb63b3 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/endpointdiscovery/EndpointDiscoveryAsyncCacheLoaderGenerator.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/endpointdiscovery/EndpointDiscoveryAsyncCacheLoaderGenerator.java @@ -27,6 +27,7 @@ import com.squareup.javapoet.TypeSpec; import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.List; import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.codegen.emitters.GeneratorTaskParams; @@ -38,6 +39,7 @@ import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryCacheLoader; import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryEndpoint; import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRequest; +import software.amazon.awssdk.utils.Validate; public class EndpointDiscoveryAsyncCacheLoaderGenerator implements ClassSpec { @@ -100,12 +102,15 @@ private MethodSpec discoverEndpoint(OperationModel opModel) { .returns(returnType); if (!opModel.getInputShape().isHasHeaderMember()) { + ClassName endpointClass = poetExtensions.getModelClass("Endpoint"); methodBuilder.addCode("return $L.$L($L.builder().build()).thenApply(r -> {", CLIENT_FIELD, opModel.getMethodName(), poetExtensions.getModelClass(opModel.getInputShape().getC2jName())) - .addStatement("$T endpoint = r.endpoints().get(0)", - poetExtensions.getModelClass("Endpoint")) + .addStatement("$T<$T> endpoints = r.endpoints()", List.class, endpointClass) + .addStatement("$T.notEmpty(endpoints, \"Endpoints returned by service for endpoint discovery must " + + "not be empty.\")", Validate.class) + .addStatement("$T endpoint = endpoints.get(0)", endpointClass) .addStatement("return $T.builder().endpoint(toUri(endpoint.address(), $L.defaultEndpoint()))" + ".expirationTime($T.now().plus(endpoint.cachePeriodInMinutes(), $T.MINUTES)).build()", EndpointDiscoveryEndpoint.class, diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/endpointdiscovery/EndpointDiscoveryCacheLoaderGenerator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/endpointdiscovery/EndpointDiscoveryCacheLoaderGenerator.java index 5661979673c5..b52f1354b820 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/endpointdiscovery/EndpointDiscoveryCacheLoaderGenerator.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/endpointdiscovery/EndpointDiscoveryCacheLoaderGenerator.java @@ -27,6 +27,7 @@ import com.squareup.javapoet.TypeSpec; import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.List; import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.codegen.emitters.GeneratorTaskParams; @@ -38,6 +39,7 @@ import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryCacheLoader; import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryEndpoint; import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRequest; +import software.amazon.awssdk.utils.Validate; public class EndpointDiscoveryCacheLoaderGenerator implements ClassSpec { @@ -101,14 +103,18 @@ private MethodSpec discoverEndpoint(OperationModel opModel) { .returns(returnType); if (!opModel.getInputShape().isHasHeaderMember()) { + ClassName endpointClass = poetExtensions.getModelClass("Endpoint"); methodBuilder.addCode("return $T.supplyAsync(() -> {", CompletableFuture.class) .addStatement("$T response = $L.$L($L.builder().build())", poetExtensions.getModelClass(opModel.getOutputShape().getC2jName()), CLIENT_FIELD, opModel.getMethodName(), poetExtensions.getModelClass(opModel.getInputShape().getC2jName())) - .addStatement("$T endpoint = response.endpoints().get(0)", - poetExtensions.getModelClass("Endpoint")) + .addStatement("$T<$T> endpoints = response.endpoints()", List.class, endpointClass) + .addStatement("$T.notEmpty(endpoints, \"Endpoints returned by service for endpoint discovery must " + + "not be empty.\")", Validate.class) + .addStatement("$T endpoint = endpoints.get(0)", + endpointClass) .addStatement("return $T.builder().endpoint(toUri(endpoint.address(), $L.defaultEndpoint()))" + ".expirationTime($T.now().plus(endpoint.cachePeriodInMinutes(), $T.MINUTES)).build()", EndpointDiscoveryEndpoint.class, diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/eventstream/EventStreamUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/eventstream/EventStreamUtils.java index cbd1489bfde4..c238fc6b202d 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/eventstream/EventStreamUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/eventstream/EventStreamUtils.java @@ -15,7 +15,10 @@ package software.amazon.awssdk.codegen.poet.eventstream; +import java.util.Collection; import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; import java.util.stream.Stream; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.MemberModel; @@ -55,10 +58,11 @@ public static ShapeModel getEventStreamInResponse(ShapeModel responseShape) { } /** - * Get event stream shape from a request/response shape model. Otherwise return empty optional. + * Get event stream shape from a request/response shape model. Otherwise, throw * * @param shapeModel request or response shape of an operation - * @return Optional containing the Eventstream shape + * @return the EventStream shape + * @throws IllegalStateException if there is no associated event stream shape */ private static ShapeModel eventStreamFrom(ShapeModel shapeModel) { if (shapeModel == null || shapeModel.getMembers() == null) { @@ -80,17 +84,15 @@ private static ShapeModel eventStreamFrom(ShapeModel shapeModel) { * * @param model Intermediate model * @param eventShape shape with "event: true" trait - * @return the event stream shape (eventstream: true) that contains the given event. + * @return the event stream shape (eventstream: true) that contains the given event, or an empty optional if the C2J shape + * is marked as an event but the intermediate model representation is not used by an event stream */ - public static ShapeModel getBaseEventStreamShape(IntermediateModel model, ShapeModel eventShape) { + public static Optional getBaseEventStreamShape(IntermediateModel model, ShapeModel eventShape) { return model.getShapes().values() .stream() .filter(ShapeModel::isEventStream) .filter(s -> s.getMembers().stream().anyMatch(m -> m.getShape().equals(eventShape))) - .findFirst() - .orElseThrow(() -> new IllegalStateException( - String.format("Event shape %s not referenced in model by any eventstream shape", - eventShape.getC2jName()))); + .findFirst(); } /** @@ -116,16 +118,21 @@ public static Stream getEventMembers(ShapeModel eventStreamShape) { } /** - * Returns the first operation that contains the given event stream shape. The event stream can be in operation + * Returns the all operations that contain the given event stream shape. The event stream can be in operation * request or response shape. */ - public static OperationModel findOperationWithEventStream(IntermediateModel model, ShapeModel eventStreamShape) { - return model.getOperations().values() + public static Collection findOperationsWithEventStream(IntermediateModel model, ShapeModel eventStreamShape) { + Collection operations = model.getOperations().values() .stream() .filter(op -> operationContainsEventStream(op, eventStreamShape)) - .findFirst() - .orElseThrow(() -> new IllegalStateException(String.format( - "%s is an event shape but has no corresponding operation in the model", eventStreamShape.getC2jName()))); + .collect(Collectors.toList()); + + if (operations.isEmpty()) { + throw new IllegalStateException(String.format( + "%s is an event shape but has no corresponding operation in the model", eventStreamShape.getC2jName())); + } + + return operations; } /** @@ -145,14 +152,11 @@ public static boolean doesShapeContainsEventStream(ShapeModel parentShape, Shape * Returns true if the given event shape is a sub-member of any operation request. */ public static boolean isRequestEvent(IntermediateModel model, ShapeModel eventShape) { - try { - ShapeModel eventStreamShape = getBaseEventStreamShape(model, eventShape); - return model.getOperations().values() - .stream() - .anyMatch(o -> doesShapeContainsEventStream(o.getInputShape(), eventStreamShape)); - } catch (IllegalStateException e) { - return false; - } + return getBaseEventStreamShape(model, eventShape) + .map(stream -> model.getOperations().values() + .stream() + .anyMatch(o -> doesShapeContainsEventStream(o.getInputShape(), stream))) + .orElse(false); } private static boolean operationContainsEventStream(OperationModel opModel, ShapeModel eventStreamShape) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java index 65689e909b5a..c92fb5020e62 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/AwsServiceModel.java @@ -28,6 +28,7 @@ import com.squareup.javapoet.WildcardTypeName; import java.io.Serializable; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Optional; @@ -84,46 +85,40 @@ public AwsServiceModel(IntermediateModel intermediateModel, ShapeModel shapeMode @Override public TypeSpec poetSpec() { if (shapeModel.isEventStream()) { - OperationModel opModel = EventStreamUtils.findOperationWithEventStream(intermediateModel, - shapeModel); - String apiName = poetExtensions.getApiName(opModel); - ClassName modelClass = poetExtensions.getModelClassFromShape(shapeModel); + Collection opModels = EventStreamUtils.findOperationsWithEventStream(intermediateModel, + shapeModel); - if (EventStreamUtils.doesShapeContainsEventStream(opModel.getOutputShape(), shapeModel)) { - ClassName responseHandlerClass = poetExtensions.eventStreamResponseHandlerType(opModel); - return PoetUtils.createInterfaceBuilder(modelClass) - .addAnnotation(SdkPublicApi.class) - .addSuperinterface(ClassName.get(SdkPojo.class)) - .addJavadoc("Base interface for all event types of the $L API.", apiName) - .addField(FieldSpec.builder(modelClass, "UNKNOWN") - .addModifiers(PUBLIC, Modifier.STATIC, Modifier.FINAL) - .initializer(CodeBlock.builder() - .add("new $T() {\n" - + " @Override\n" - + " public $T<$T> sdkFields() {\n" - + " return $T.emptyList();\n" - + " }\n" - + " @Override\n" - + " public void accept($T.Visitor visitor) {" - + " \nvisitor.visitDefault(this);\n" - + " }\n" - + " };\n", - modelClass, List.class, SdkField.class, - Collections.class, responseHandlerClass - ) - .build()) - .addJavadoc("Special type of {@link $T} for unknown types of events that this " - + "version of the SDK does not know about", modelClass) - .build()) - .addMethod(acceptMethodSpec(modelClass, responseHandlerClass) - .addModifiers(Modifier.ABSTRACT) - .build()) - .build(); + Collection outputOperations = findOutputEventStreamOperations(opModels, shapeModel); + + ClassName modelClass = poetExtensions.getModelClassFromShape(shapeModel); - } else if (EventStreamUtils.doesShapeContainsEventStream(opModel.getInputShape(), shapeModel)) { + if (!outputOperations.isEmpty()) { + CodeBlock unknownInitializer = buildUnknownEventStreamInitializer(outputOperations, + modelClass); + + TypeSpec.Builder builder = + PoetUtils.createInterfaceBuilder(modelClass) + .addAnnotation(SdkPublicApi.class) + .addSuperinterface(ClassName.get(SdkPojo.class)) + .addJavadoc("Base interface for all event types in $L.", shapeModel.getShapeName()) + .addField(FieldSpec.builder(modelClass, "UNKNOWN") + .addModifiers(PUBLIC, Modifier.STATIC, Modifier.FINAL) + .initializer(unknownInitializer) + .addJavadoc("Special type of {@link $T} for unknown types of events that this " + + "version of the SDK does not know about", modelClass) + .build()); + + for (OperationModel opModel : outputOperations) { + ClassName responseHandlerClass = poetExtensions.eventStreamResponseHandlerType(opModel); + builder.addMethod(acceptMethodSpec(modelClass, responseHandlerClass) + .addModifiers(Modifier.ABSTRACT) + .build()); + } + return builder.build(); + } else if (hasInputStreamOperations(opModels, shapeModel)) { return PoetUtils.createInterfaceBuilder(modelClass) .addAnnotation(SdkPublicApi.class) - .addJavadoc("Base interface for all event types of the $L API.", apiName) + .addJavadoc("Base interface for all event types in $L.", shapeModel.getShapeName()) .build(); } @@ -158,28 +153,9 @@ public TypeSpec poetSpec() { } if (this.shapeModel.isEvent()) { - ShapeModel eventStream = EventStreamUtils.getBaseEventStreamShape(intermediateModel, shapeModel); - ClassName eventStreamClassName = poetExtensions.getModelClassFromShape(eventStream); - OperationModel opModel = EventStreamUtils.findOperationWithEventStream(intermediateModel, - eventStream); - - if (EventStreamUtils.doesShapeContainsEventStream(opModel.getOutputShape(), eventStream)) { - ClassName modelClass = poetExtensions.getModelClass(shapeModel.getShapeName()); - ClassName responseHandlerClass = poetExtensions.eventStreamResponseHandlerType(opModel); - specBuilder.addSuperinterface(eventStreamClassName); - specBuilder.addMethod(acceptMethodSpec(modelClass, responseHandlerClass) - .addAnnotation(Override.class) - .addCode(CodeBlock.builder() - .addStatement("visitor.visit(this)") - .build()) - .build()); - - } else if (EventStreamUtils.doesShapeContainsEventStream(opModel.getInputShape(), eventStream)) { - specBuilder.addSuperinterface(eventStreamClassName); - } else { - throw new IllegalArgumentException(shapeModel.getC2jName() + " event shape is not a member in any " - + "request or response event shape"); - } + EventStreamUtils.getBaseEventStreamShape(intermediateModel, shapeModel).ifPresent( + eventStream -> addEventSupport(specBuilder, eventStream) + ); } if (this.shapeModel.getDocumentation() != null) { @@ -190,6 +166,71 @@ public TypeSpec poetSpec() { } } + private void addEventSupport(TypeSpec.Builder specBuilder, ShapeModel eventStream) { + ClassName eventStreamClassName = poetExtensions.getModelClassFromShape(eventStream); + Collection opModels = EventStreamUtils.findOperationsWithEventStream(intermediateModel, + eventStream); + + Collection outputOperations = findOutputEventStreamOperations(opModels, eventStream); + + if (!outputOperations.isEmpty()) { + ClassName modelClass = poetExtensions.getModelClass(shapeModel.getShapeName()); + specBuilder.addSuperinterface(eventStreamClassName); + for (OperationModel opModel : outputOperations) { + ClassName responseHandlerClass = poetExtensions.eventStreamResponseHandlerType(opModel); + specBuilder.addMethod(acceptMethodSpec(modelClass, responseHandlerClass) + .addAnnotation(Override.class) + .addCode(CodeBlock.builder() + .addStatement("visitor.visit(this)") + .build()) + .build()); + } + } else if (hasInputStreamOperations(opModels, eventStream)) { + specBuilder.addSuperinterface(eventStreamClassName); + } else { + throw new IllegalArgumentException(shapeModel.getC2jName() + " event shape is not a member in any " + + "request or response event shape"); + } + } + + private boolean hasInputStreamOperations(Collection opModels, ShapeModel eventStream) { + return opModels.stream() + .anyMatch(op -> EventStreamUtils.doesShapeContainsEventStream(op.getInputShape(), eventStream)); + } + + private List findOutputEventStreamOperations(Collection opModels, + ShapeModel eventStream) { + return opModels + .stream() + .filter(opModel -> EventStreamUtils.doesShapeContainsEventStream(opModel.getOutputShape(), eventStream)) + .collect(Collectors.toList()); + } + + private CodeBlock buildUnknownEventStreamInitializer(Collection outputOperations, + ClassName eventStreamModelClass) { + CodeBlock.Builder builder = CodeBlock.builder() + .add("new $T() {\n" + + " @Override\n" + + " public $T<$T> sdkFields() {\n" + + " return $T.emptyList();\n" + + " }\n", + eventStreamModelClass, List.class, SdkField.class, + Collections.class + ); + + for (OperationModel opModel : outputOperations) { + ClassName responseHandlerClass = poetExtensions.eventStreamResponseHandlerType(opModel); + builder.add(" @Override\n" + + " public void accept($T.Visitor visitor) {" + + " \nvisitor.visitDefault(this);\n" + + " }\n", responseHandlerClass); + } + + builder.add(" }\n"); + + return builder.build(); + } + private MethodSpec sdkFieldsMethod() { ParameterizedTypeName sdkFieldType = ParameterizedTypeName.get(ClassName.get(SdkField.class), WildcardTypeName.subtypeOf(ClassName.get(Object.class))); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/paginators/PaginatorsClassSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/paginators/PaginatorsClassSpec.java index ce001db96df5..0879d4eca6bf 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/paginators/PaginatorsClassSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/paginators/PaginatorsClassSpec.java @@ -63,7 +63,7 @@ public PaginatorsClassSpec(IntermediateModel model, String c2jOperationName, Pag this.poetExtensions = new PoetExtensions(model); this.typeProvider = new TypeProvider(model); this.operationModel = model.getOperation(c2jOperationName); - this.paginationDocs = new PaginationDocs(model, operationModel); + this.paginationDocs = new PaginationDocs(model, operationModel, paginatorDefinition); } /** diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/protocols/EventStreamJsonMarshallerSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/protocols/EventStreamJsonMarshallerSpec.java index a2bc02eb508b..ac04d95a7777 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/protocols/EventStreamJsonMarshallerSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/transform/protocols/EventStreamJsonMarshallerSpec.java @@ -84,7 +84,9 @@ protected FieldSpec operationInfoField() { } private String getMemberNameFromEventStream() { - ShapeModel eventStream = EventStreamUtils.getBaseEventStreamShape(intermediateModel, shapeModel); + ShapeModel eventStream = EventStreamUtils.getBaseEventStreamShape(intermediateModel, shapeModel) + .orElseThrow(() -> new IllegalStateException("Could not find associated event stream spec for " + + shapeModel.getC2jName())); return eventStream.getMembers().stream() .filter(memberModel -> memberModel.getShape().equals(shapeModel)) .findAny() diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/IntermediateModelBuilderTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/IntermediateModelBuilderTest.java new file mode 100644 index 000000000000..378f45a6f4b4 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/IntermediateModelBuilderTest.java @@ -0,0 +1,92 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import org.junit.Test; +import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; +import software.amazon.awssdk.codegen.model.service.ServiceModel; +import software.amazon.awssdk.codegen.utils.ModelLoaderUtils; + +public class IntermediateModelBuilderTest { + + @Test + public void testServiceAndShapeNameCollisions() throws Exception { + final File modelFile = new File(IntermediateModelBuilderTest.class + .getResource("poet/client/c2j/collision/service-2.json").getFile()); + IntermediateModel testModel = new IntermediateModelBuilder( + C2jModels.builder() + .serviceModel(ModelLoaderUtils.loadModel(ServiceModel.class, modelFile)) + .customizationConfig(CustomizationConfig.create()) + .build()) + .build(); + + assertThat(testModel.getShapes().values()) + .extracting(ShapeModel::getShapeName) + .containsExactlyInAnyOrder("DefaultCollisionException", "DefaultCollisionRequest", "DefaultCollisionResponse"); + } + + @Test + public void sharedOutputShapesLinkCorrectlyToOperationOutputs() { + final File modelFile = new File(IntermediateModelBuilderTest.class + .getResource("poet/client/c2j/shared-output/service-2.json").getFile()); + IntermediateModel testModel = new IntermediateModelBuilder( + C2jModels.builder() + .serviceModel(ModelLoaderUtils.loadModel(ServiceModel.class, modelFile)) + .customizationConfig(CustomizationConfig.create()) + .build()) + .build(); + + assertEquals("PingResponse", testModel.getOperation("Ping").getOutputShape().getShapeName()); + assertEquals("SecurePingResponse", testModel.getOperation("SecurePing").getOutputShape().getShapeName()); + } + + @Test + public void defaultEndpointDiscovery_true() { + final File modelFile = new File(IntermediateModelBuilderTest.class + .getResource("poet/client/c2j/endpointdiscovery/service-2.json").getFile()); + IntermediateModel testModel = new IntermediateModelBuilder( + C2jModels.builder() + .serviceModel(ModelLoaderUtils.loadModel(ServiceModel.class, modelFile)) + .customizationConfig(CustomizationConfig.create()) + .build()) + .build(); + + assertTrue(testModel.getEndpointOperation().get().isEndpointCacheRequired()); + } + + @Test + public void defaultEndpointDiscovery_false() { + final File modelFile = new File(IntermediateModelBuilderTest.class + .getResource("poet/client/c2j/endpointdiscoveryoptional/service-2.json").getFile()); + IntermediateModel testModel = new IntermediateModelBuilder( + C2jModels.builder() + .serviceModel(ModelLoaderUtils.loadModel(ServiceModel.class, modelFile)) + .customizationConfig(CustomizationConfig.create()) + .build()) + .build(); + + assertFalse(testModel.getEndpointOperation().get().isEndpointCacheRequired()); + } + +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModelTest.java new file mode 100644 index 000000000000..418e9b49b40a --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/IntermediateModelTest.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.io.File; +import java.util.Collections; +import org.junit.Test; +import software.amazon.awssdk.codegen.C2jModels; +import software.amazon.awssdk.codegen.IntermediateModelBuilder; +import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; +import software.amazon.awssdk.codegen.model.service.ServiceMetadata; +import software.amazon.awssdk.codegen.model.service.ServiceModel; +import software.amazon.awssdk.codegen.utils.ModelLoaderUtils; + +public class IntermediateModelTest { + + @Test + public void cannotFindShapeWhenNoShapesExist() { + + ServiceMetadata metadata = new ServiceMetadata(); + metadata.setProtocol(Protocol.REST_JSON.getValue()); + metadata.setServiceId("empty-service"); + metadata.setSignatureVersion("V4"); + + IntermediateModel testModel = new IntermediateModelBuilder( + C2jModels.builder() + .serviceModel(new ServiceModel(metadata, + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap())) + .customizationConfig(CustomizationConfig.create()) + .build()) + .build(); + + assertThatThrownBy(() -> testModel.getShapeByNameAndC2jName("AnyShape", "AnyShape")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("C2J shape AnyShape with shape name AnyShape does not exist in the intermediate model."); + } + + @Test + public void getShapeByNameAndC2jNameVerifiesC2JName() { + final File modelFile = new File(IntermediateModelTest.class + .getResource("../../poet/client/c2j/shared-output/service-2.json").getFile()); + IntermediateModel testModel = new IntermediateModelBuilder( + C2jModels.builder() + .serviceModel(ModelLoaderUtils.loadModel(ServiceModel.class, modelFile)) + .customizationConfig(CustomizationConfig.create()) + .build()) + .build(); + + + + assertThatThrownBy(() -> testModel.getShapeByNameAndC2jName("PingResponse", "AnyShape")) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage("C2J shape AnyShape with shape name PingResponse does not exist in the intermediate model."); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java index 88f1298f12fb..f30c642cbeb6 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java @@ -250,6 +250,8 @@ public void sharedModel_providingPackageName_shouldUseProvidedPacakgeName() { @Test public void modelNameShouldHavePascalCase() { + when(serviceModel.getMetadata()).thenReturn(serviceMetadata); + when(serviceMetadata.getServiceId()).thenReturn("UnitTestService"); assertThat(strat.getRequestClassName("CAPSTest")).isEqualTo("CapsTestRequest"); assertThat(strat.getExceptionName("CAPSTest")).isEqualTo("CapsTestException"); assertThat(strat.getResponseClassName("CAPSTest")).isEqualTo("CapsTestResponse"); diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/SharedStreamAwsModelSpecTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/SharedStreamAwsModelSpecTest.java new file mode 100644 index 000000000000..dffbe834b6fd --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/model/SharedStreamAwsModelSpecTest.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.model; + +import static java.util.stream.Collectors.toList; +import static org.hamcrest.MatcherAssert.assertThat; +import static software.amazon.awssdk.codegen.poet.PoetMatchers.generatesTo; +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.io.File; +import java.io.IOException; +import java.util.Collection; +import java.util.Locale; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import software.amazon.awssdk.codegen.C2jModels; +import software.amazon.awssdk.codegen.IntermediateModelBuilder; +import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; +import software.amazon.awssdk.codegen.model.service.ServiceModel; +import software.amazon.awssdk.codegen.utils.ModelLoaderUtils; + +@RunWith(Parameterized.class) +public class SharedStreamAwsModelSpecTest { + private static IntermediateModel intermediateModel; + + private final ShapeModel shapeModel; + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + invokeSafely(SharedStreamAwsModelSpecTest::setUp); + return intermediateModel.getShapes().values().stream().map(shape -> new Object[] { shape }).collect(toList()); + } + + public SharedStreamAwsModelSpecTest(ShapeModel shapeModel) { + this.shapeModel = shapeModel; + } + + @Test + public void basicGeneration() throws Exception { + assertThat(new AwsServiceModel(intermediateModel, shapeModel), generatesTo(referenceFileForShape())); + } + + private String referenceFileForShape() { + return "sharedstream/" + shapeModel.getShapeName().toLowerCase(Locale.ENGLISH) + ".java"; + } + + private static void setUp() throws IOException { + File serviceModelFile = new File(SharedStreamAwsModelSpecTest.class.getResource("sharedstream/service-2.json").getFile()); + ServiceModel serviceModel = ModelLoaderUtils.loadModel(ServiceModel.class, serviceModelFile); + + intermediateModel = new IntermediateModelBuilder( + C2jModels.builder() + .serviceModel(serviceModel) + .customizationConfig(CustomizationConfig.create()) + .build()) + .build(); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/collision/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/collision/service-2.json new file mode 100644 index 000000000000..431d52020f2b --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/collision/service-2.json @@ -0,0 +1,52 @@ +{ + "version": "2.0", + "metadata": { + "apiVersion": "2010-05-08", + "endpointPrefix": "collision-service", + "globalEndpoint": "collision-service.amazonaws.com", + "protocol": "rest-json", + "serviceAbbreviation": "Collision Service", + "serviceFullName": "A really bizarrely modelled service", + "serviceId":"CollisionService", + "signatureVersion": "v4", + "uid": "collision-service-2010-05-08", + "xmlNamespace": "https://collision-service.amazonaws.com/doc/2010-05-08/" + }, + "operations": { + "Collision": { + "name": "Collision", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "Collision" + }, + "errors": [ + { + "shape": "CollisionFault" + } + ], + "documentation": "

A very strange operation>

" + } + }, + "shapes": { + "Collision": { + "type": "structure", + "required": [], + "members": {} + }, + "CollisionFault" : { + "type": "structure", + "members": {}, + "documentation": "

A fault

", + "error": { + "code": "Collision", + "httpStatusCode": 400, + "senderFault": true + }, + "exception": true + } + }, + "documentation": "Why would anyone do this?" +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/endpointdiscoveryoptional/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/endpointdiscoveryoptional/customization.config new file mode 100644 index 000000000000..2c63c0851048 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/endpointdiscoveryoptional/customization.config @@ -0,0 +1,2 @@ +{ +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/endpointdiscoveryoptional/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/endpointdiscoveryoptional/service-2.json new file mode 100644 index 000000000000..4e51209e6933 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/endpointdiscoveryoptional/service-2.json @@ -0,0 +1,134 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-08-31", + "endpointPrefix":"awsendpointdiscoverytestservice", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"AwsEndpointDiscoveryTest", + "serviceFullName":"AwsEndpointDiscoveryTest", + "serviceId":"AwsEndpointDiscoveryTest", + "signatureVersion":"v4", + "signingName":"awsendpointdiscoverytestservice", + "targetPrefix":"AwsEndpointDiscoveryTestService" + }, + "operations":{ + "DescribeEndpoints":{ + "name":"DescribeEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEndpointsRequest"}, + "output":{"shape":"DescribeEndpointsResponse"}, + "endpointoperation":true + }, + "TestDiscoveryOptional":{ + "name":"TestDiscoveryOptional", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestDiscoveryOptionalRequest"}, + "output":{"shape":"TestDiscoveryOptionalResponse"}, + "endpointdiscovery":{ + } + } + }, + "shapes": { + "Boolean": { + "type": "boolean" + }, + "DescribeEndpointsRequest": { + "type": "structure", + "members": { + "Operation": { + "shape": "String" + }, + "Identifiers": { + "shape": "Identifiers" + } + } + }, + "DescribeEndpointsResponse": { + "type": "structure", + "required": [ + "Endpoints" + ], + "members": { + "Endpoints": { + "shape": "Endpoints" + } + } + }, + "Endpoint": { + "type": "structure", + "required": [ + "Address", + "CachePeriodInMinutes" + ], + "members": { + "Address": { + "shape": "String" + }, + "CachePeriodInMinutes": { + "shape": "Long" + } + } + }, + "Endpoints": { + "type": "list", + "member": { + "shape": "Endpoint" + } + }, + "Identifiers": { + "type": "map", + "key": { + "shape": "String" + }, + "value": { + "shape": "String" + } + }, + "Long": { + "type": "long" + }, + "String": { + "type": "string" + }, + "TestDiscoveryIdentifiersRequiredRequest": { + "type": "structure", + "required": [ + "Sdk" + ], + "members": { + "Sdk": { + "shape": "String", + "endpointdiscoveryid": true + } + } + }, + "TestDiscoveryIdentifiersRequiredResponse": { + "type": "structure", + "members": { + "DiscoveredEndpoint": { + "shape": "Boolean" + } + } + }, + "TestDiscoveryOptionalRequest": { + "type": "structure", + "members": { + } + }, + "TestDiscoveryOptionalResponse": { + "type": "structure", + "members": { + "DiscoveredEndpoint": { + "shape": "Boolean" + } + } + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json index 198ae99ea74d..2cca71c81ecc 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json @@ -404,7 +404,7 @@ "EventOne": { "shape": "EventOne" }, - "EventTwo": { + "event-two": { "shape": "EventTwo" } }, diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/shared-output/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/shared-output/service-2.json new file mode 100644 index 000000000000..96db66f7b864 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/shared-output/service-2.json @@ -0,0 +1,49 @@ +{ + "version": "2.0", + "metadata": { + "apiVersion": "2010-05-08", + "endpointPrefix": "shared-output-service", + "globalEndpoint": "shared-output-service.amazonaws.com", + "protocol": "rest-json", + "serviceAbbreviation": "Shared Output Service", + "serviceFullName": "Shared output service", + "serviceId":"SharedOutputService", + "signatureVersion": "v4", + "uid": "shared-output-service-2010-05-08", + "xmlNamespace": "https://shared-output-service.amazonaws.com/doc/2010-05-08/" + }, + "operations": { + "Ping": { + "name": "ping", + "http": { + "method": "POST", + "requestUri": "/ping" + }, + "errors": [], + "output": { + "shape" : "PingOutput" + }, + "documentation": "

ping

" + }, + "SecurePing": { + "name": "sping", + "http": { + "method": "POST", + "requestUri": "/sping" + }, + "errors": [], + "output": { + "shape" : "PingOutput" + }, + "documentation": "

secure ping

" + } + }, + "shapes": { + "PingOutput": { + "type": "structure", + "required": [], + "members": {} + } + }, + "documentation": "A ping service that shared outputs" +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-async-client-class.java index 47cb182806f0..b693b2aea0f8 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-async-client-class.java @@ -3,6 +3,8 @@ import static software.amazon.awssdk.utils.FunctionalUtils.runAndLogError; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; import java.util.function.Consumer; @@ -22,20 +24,26 @@ import software.amazon.awssdk.awscore.eventstream.RestEventStreamAsyncResponseTransformer; import software.amazon.awssdk.awscore.exception.AwsServiceException; import software.amazon.awssdk.core.ApiName; +import software.amazon.awssdk.core.RequestOverrideConfiguration; +import software.amazon.awssdk.core.SdkPojoBuilder; import software.amazon.awssdk.core.SdkResponse; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.client.config.SdkAdvancedAsyncClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.client.handler.AsyncClientHandler; import software.amazon.awssdk.core.client.handler.AttachHttpMetadataResponseHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.core.util.VersionInfo; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.json.AwsJsonProtocol; import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; @@ -59,7 +67,6 @@ import software.amazon.awssdk.services.json.model.GetWithoutRequiredMembersRequest; import software.amazon.awssdk.services.json.model.GetWithoutRequiredMembersResponse; import software.amazon.awssdk.services.json.model.InputEvent; -import software.amazon.awssdk.services.json.model.InputEventOne; import software.amazon.awssdk.services.json.model.InputEventStream; import software.amazon.awssdk.services.json.model.InputEventStreamTwo; import software.amazon.awssdk.services.json.model.InputEventTwo; @@ -85,7 +92,6 @@ import software.amazon.awssdk.services.json.transform.EventStreamOperationWithOnlyOutputRequestMarshaller; import software.amazon.awssdk.services.json.transform.GetWithoutRequiredMembersRequestMarshaller; import software.amazon.awssdk.services.json.transform.InputEventMarshaller; -import software.amazon.awssdk.services.json.transform.InputEventOneMarshaller; import software.amazon.awssdk.services.json.transform.InputEventTwoMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; @@ -150,7 +156,10 @@ public final String serviceName() { */ @Override public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); String hostPrefix = "{StringMember}-foo."; Validate.paramNotBlank(aPostOperationRequest.stringMember(), "StringMember"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); @@ -168,9 +177,19 @@ public CompletableFuture aPostOperation(APostOperationRe .withOperationName("APostOperation") .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .withMetricCollector(apiCallMetricCollector).hostPrefixExpression(resolvedHostExpression) + .withInput(aPostOperationRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = aPostOperationRequest.overrideConfiguration().orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -201,7 +220,10 @@ public CompletableFuture aPostOperation(APostOperationRe @Override public CompletableFuture aPostOperationWithOutput( APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); @@ -216,9 +238,19 @@ public CompletableFuture aPostOperationWithOut .withOperationName("APostOperationWithOutput") .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(aPostOperationWithOutputRequest)); + .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationWithOutputRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = aPostOperationWithOutputRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -245,7 +277,10 @@ public CompletableFuture aPostOperationWithOut @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); eventStreamOperationRequest = applySignerOverride(eventStreamOperationRequest, EventStreamAws4Signer.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); @@ -259,8 +294,8 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventOne::builder) - .putSdkPojoSupplier("EventTwo", EventTwo::builder).defaultSdkPojoSupplier(() -> EventStream.UNKNOWN) - .build()); + .putSdkPojoSupplier("event-two", EventTwo::builder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); @@ -286,9 +321,11 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) .withAsyncRequestBody(software.amazon.awssdk.core.async.AsyncRequestBody.fromPublisher(adapted)) .withFullDuplex(true).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(eventStreamOperationRequest), - restAsyncResponseTransformer); - executeFuture.whenComplete((r, e) -> { + .withErrorResponseHandler(errorResponseHandler).withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationRequest), restAsyncResponseTransformer); + AwsRequestOverrideConfiguration requestOverrideConfig = eventStreamOperationRequest.overrideConfiguration().orElse( + null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { asyncResponseHandler.exceptionOccurred(e); @@ -296,11 +333,17 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest future.completeExceptionally(e); } } + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", () -> asyncResponseHandler.exceptionOccurred(t)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -329,7 +372,10 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest public CompletableFuture eventStreamOperationWithOnlyInput( EventStreamOperationWithOnlyInputRequest eventStreamOperationWithOnlyInputRequest, Publisher requestStream) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyInput"); eventStreamOperationWithOnlyInputRequest = applySignerOverride(eventStreamOperationWithOnlyInputRequest, EventStreamAws4Signer.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) @@ -341,7 +387,7 @@ public CompletableFuture eventStreamO HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() - .putMarshaller(InputEventOne.class, new InputEventOneMarshaller(protocolFactory)) + .putMarshaller(InputEvent.class, new InputEventMarshaller(protocolFactory)) .putMarshaller(InputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); Publisher adapted = eventPublisher.map(event -> eventMarshaller.marshall(event)).map( @@ -353,9 +399,19 @@ public CompletableFuture eventStreamO .withMarshaller(new EventStreamOperationWithOnlyInputRequestMarshaller(protocolFactory)) .withAsyncRequestBody(software.amazon.awssdk.core.async.AsyncRequestBody.fromPublisher(adapted)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(eventStreamOperationWithOnlyInputRequest)); + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationWithOnlyInputRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = eventStreamOperationWithOnlyInputRequest + .overrideConfiguration().orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + eventStreamOperationWithOnlyInputRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -382,45 +438,51 @@ public CompletableFuture eventStreamO */ @Override public CompletableFuture eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperationWithOnlyOutput"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = new AttachHttpMetadataResponseHandler( - protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); + protocolFactory.createResponseHandler(operationMetadata, EventStreamOperationWithOnlyOutputResponse::builder)); HttpResponseHandler voidResponseHandler = protocolFactory.createResponseHandler(JsonOperationMetadata - .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); + .builder().isPayloadJson(false).hasStreamingSuccessResponse(true).build(), VoidSdkResponse::builder); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), - EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventOne::builder) - .putSdkPojoSupplier("EventTwo", EventTwo::builder).defaultSdkPojoSupplier(() -> EventStream.UNKNOWN) - .build()); + JsonOperationMetadata.builder().isPayloadJson(true).hasStreamingSuccessResponse(false).build(), + EventStreamTaggedUnionPojoSupplier.builder().putSdkPojoSupplier("EventOne", EventOne::builder) + .putSdkPojoSupplier("event-two", EventTwo::builder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build()); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder() - .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) - .initialResponseHandler(responseHandler).exceptionResponseHandler(errorResponseHandler).future(future) - .executor(executor).serviceName(serviceName()).build(); + . builder() + .eventStreamResponseHandler(asyncResponseHandler).eventResponseHandler(eventResponseHandler) + .initialResponseHandler(responseHandler).exceptionResponseHandler(errorResponseHandler).future(future) + .executor(executor).serviceName(serviceName()).build(); RestEventStreamAsyncResponseTransformer restAsyncResponseTransformer = RestEventStreamAsyncResponseTransformer - . builder() - .eventStreamAsyncResponseTransformer(asyncResponseTransformer) - .eventStreamResponseHandler(asyncResponseHandler).build(); + . builder() + .eventStreamAsyncResponseTransformer(asyncResponseTransformer) + .eventStreamResponseHandler(asyncResponseHandler).build(); CompletableFuture executeFuture = clientHandler - .execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperationWithOnlyOutput") - .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(eventStreamOperationWithOnlyOutputRequest), restAsyncResponseTransformer); - executeFuture.whenComplete((r, e) -> { + .execute( + new ClientExecutionParams() + .withOperationName("EventStreamOperationWithOnlyOutput") + .withMarshaller(new EventStreamOperationWithOnlyOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .withInput(eventStreamOperationWithOnlyOutputRequest), restAsyncResponseTransformer); + AwsRequestOverrideConfiguration requestOverrideConfig = eventStreamOperationWithOnlyOutputRequest + .overrideConfiguration().orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { try { asyncResponseHandler.exceptionOccurred(e); @@ -428,11 +490,17 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( future.completeExceptionally(e); } } + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + eventStreamOperationWithOnlyOutputRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -463,7 +531,10 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( @Override public CompletableFuture getWithoutRequiredMembers( GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); @@ -478,9 +549,19 @@ public CompletableFuture getWithoutRequiredMe .withOperationName("GetWithoutRequiredMembers") .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(getWithoutRequiredMembersRequest)); + .withMetricCollector(apiCallMetricCollector).withInput(getWithoutRequiredMembersRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = getWithoutRequiredMembersRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + getWithoutRequiredMembersRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -508,7 +589,10 @@ public CompletableFuture getWithoutRequiredMe @Override public CompletableFuture paginatedOperationWithResultKey( PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); @@ -523,9 +607,19 @@ public CompletableFuture paginatedOpera .withOperationName("PaginatedOperationWithResultKey") .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(paginatedOperationWithResultKeyRequest)); + .withMetricCollector(apiCallMetricCollector).withInput(paginatedOperationWithResultKeyRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = paginatedOperationWithResultKeyRequest + .overrideConfiguration().orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -552,7 +646,7 @@ public CompletableFuture paginatedOpera * The following are few ways to use the response class: *

* 1) Using the subscribe helper method - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyPublisher publisher = client.paginatedOperationWithResultKeyPaginator(request);
@@ -562,21 +656,25 @@ public CompletableFuture paginatedOpera
      * 
* * 2) Using a custom subscriber - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyPublisher publisher = client.paginatedOperationWithResultKeyPaginator(request);
      * publisher.subscribe(new Subscriber() {
-     *
+     * 
      * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
-     *
-     *
+     * 
+     * 
      * public void onNext(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse response) { //... };
      * });}
      * 
- * + * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} * operation. @@ -626,7 +724,10 @@ public PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKeyP @Override public CompletableFuture paginatedOperationWithoutResultKey( PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); @@ -641,9 +742,19 @@ public CompletableFuture paginatedOp .withOperationName("PaginatedOperationWithoutResultKey") .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(paginatedOperationWithoutResultKeyRequest)); + .withMetricCollector(apiCallMetricCollector).withInput(paginatedOperationWithoutResultKeyRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = paginatedOperationWithoutResultKeyRequest + .overrideConfiguration().orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -670,7 +781,7 @@ public CompletableFuture paginatedOp * The following are few ways to use the response class: *

* 1) Using the subscribe helper method - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyPublisher publisher = client.paginatedOperationWithoutResultKeyPaginator(request);
@@ -680,21 +791,25 @@ public CompletableFuture paginatedOp
      * 
* * 2) Using a custom subscriber - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyPublisher publisher = client.paginatedOperationWithoutResultKeyPaginator(request);
      * publisher.subscribe(new Subscriber() {
-     *
+     * 
      * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
-     *
-     *
+     * 
+     * 
      * public void onNext(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyResponse response) { //... };
      * });}
      * 
- * + * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest)} * operation. @@ -749,7 +864,10 @@ public PaginatedOperationWithoutResultKeyPublisher paginatedOperationWithoutResu @Override public CompletableFuture streamingInputOperation( StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); @@ -763,13 +881,23 @@ public CompletableFuture streamingInputOperatio .execute(new ClientExecutionParams() .withOperationName("StreamingInputOperation") .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody).withInput(streamingInputOperationRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = streamingInputOperationRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -807,7 +935,10 @@ public CompletableFuture streamingInputOperatio public CompletableFuture streamingInputOutputOperation( StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, AsyncResponseTransformer asyncResponseTransformer) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); streamingInputOutputOperationRequest = applySignerOverride(streamingInputOutputOperationRequest, Aws4UnsignedPayloadSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) @@ -823,24 +954,32 @@ public CompletableFuture streamingInputOutputOperation( new ClientExecutionParams() .withOperationName("StreamingInputOutputOperation") .withMarshaller( - AsyncStreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).transferEncoding(true).build()) + AsyncStreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).transferEncoding(true).build()) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); - executeFuture.whenComplete((r, e) -> { + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); + AwsRequestOverrideConfiguration requestOverrideConfig = streamingInputOutputOperationRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseTransformer.exceptionOccurred(e)); + () -> asyncResponseTransformer.exceptionOccurred(e)); } + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", () -> asyncResponseTransformer.exceptionOccurred(t)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -873,7 +1012,10 @@ public CompletableFuture streamingInputOutputOperation( public CompletableFuture streamingOutputOperation( StreamingOutputOperationRequest streamingOutputOperationRequest, AsyncResponseTransformer asyncResponseTransformer) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) .isPayloadJson(false).build(); @@ -888,17 +1030,26 @@ public CompletableFuture streamingOutputOperation( .withOperationName("StreamingOutputOperation") .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); - executeFuture.whenComplete((r, e) -> { + .withMetricCollector(apiCallMetricCollector).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); + AwsRequestOverrideConfiguration requestOverrideConfig = streamingOutputOperationRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseTransformer.exceptionOccurred(e)); + () -> asyncResponseTransformer.exceptionOccurred(e)); } + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", () -> asyncResponseTransformer.exceptionOccurred(t)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -919,6 +1070,21 @@ private > T init(T builder) { .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); } + private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, + RequestOverrideConfiguration requestOverrideConfiguration) { + List publishers = null; + if (requestOverrideConfiguration != null) { + publishers = requestOverrideConfiguration.metricPublishers(); + } + if (publishers == null || publishers.isEmpty()) { + publishers = clientConfiguration.option(SdkClientOption.METRIC_PUBLISHERS); + } + if (publishers == null) { + publishers = Collections.emptyList(); + } + return publishers; + } + private T applyPaginatorUserAgent(T request) { Consumer userAgentApplier = b -> b.addApiName(ApiName.builder() .version(VersionInfo.SDK_VERSION).name("PAGINATED").build()); @@ -949,3 +1115,4 @@ private HttpResponseHandler createErrorResponseHandler(Base return protocolFactory.createErrorResponseHandler(operationMetadata); } } + diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-async.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-async.java index c9cba7797d3d..096effd562a6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-async.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-async.java @@ -3,14 +3,18 @@ import static software.amazon.awssdk.utils.FunctionalUtils.runAndLogError; import java.net.URI; +import java.util.Collections; +import java.util.List; import java.util.concurrent.CompletableFuture; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.client.handler.AwsAsyncClientHandler; import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.client.handler.AsyncClientHandler; @@ -18,6 +22,9 @@ import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRefreshCache; import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRequest; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.protocols.json.AwsJsonProtocol; import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; import software.amazon.awssdk.protocols.json.BaseAwsJsonProtocolFactory; @@ -89,7 +96,10 @@ public final String serviceName() { */ @Override public CompletableFuture describeEndpoints(DescribeEndpointsRequest describeEndpointsRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "AwsEndpointDiscoveryTest"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DescribeEndpoints"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); @@ -104,9 +114,18 @@ public CompletableFuture describeEndpoints(DescribeEn .withOperationName("DescribeEndpoints") .withMarshaller(new DescribeEndpointsRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(describeEndpointsRequest)); + .withMetricCollector(apiCallMetricCollector).withInput(describeEndpointsRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = describeEndpointsRequest.overrideConfiguration().orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, describeEndpointsRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -132,7 +151,10 @@ public CompletableFuture describeEndpoints(DescribeEn @Override public CompletableFuture testDiscoveryIdentifiersRequired( TestDiscoveryIdentifiersRequiredRequest testDiscoveryIdentifiersRequiredRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "AwsEndpointDiscoveryTest"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "TestDiscoveryIdentifiersRequired"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); @@ -155,9 +177,20 @@ public CompletableFuture testDiscovery .withOperationName("TestDiscoveryIdentifiersRequired") .withMarshaller(new TestDiscoveryIdentifiersRequiredRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .discoveredEndpoint(cachedEndpoint).withInput(testDiscoveryIdentifiersRequiredRequest)); + .withMetricCollector(apiCallMetricCollector).discoveredEndpoint(cachedEndpoint) + .withInput(testDiscoveryIdentifiersRequiredRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = testDiscoveryIdentifiersRequiredRequest + .overrideConfiguration().orElse(null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + testDiscoveryIdentifiersRequiredRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -182,7 +215,10 @@ public CompletableFuture testDiscovery @Override public CompletableFuture testDiscoveryOptional( TestDiscoveryOptionalRequest testDiscoveryOptionalRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "AwsEndpointDiscoveryTest"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "TestDiscoveryOptional"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); @@ -205,9 +241,20 @@ public CompletableFuture testDiscoveryOptional( .withOperationName("TestDiscoveryOptional") .withMarshaller(new TestDiscoveryOptionalRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .discoveredEndpoint(cachedEndpoint).withInput(testDiscoveryOptionalRequest)); + .withMetricCollector(apiCallMetricCollector).discoveredEndpoint(cachedEndpoint) + .withInput(testDiscoveryOptionalRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = testDiscoveryOptionalRequest.overrideConfiguration().orElse( + null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, testDiscoveryOptionalRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -232,7 +279,10 @@ public CompletableFuture testDiscoveryOptional( @Override public CompletableFuture testDiscoveryRequired( TestDiscoveryRequiredRequest testDiscoveryRequiredRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "AwsEndpointDiscoveryTest"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "TestDiscoveryRequired"); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); @@ -255,9 +305,20 @@ public CompletableFuture testDiscoveryRequired( .withOperationName("TestDiscoveryRequired") .withMarshaller(new TestDiscoveryRequiredRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .discoveredEndpoint(cachedEndpoint).withInput(testDiscoveryRequiredRequest)); + .withMetricCollector(apiCallMetricCollector).discoveredEndpoint(cachedEndpoint) + .withInput(testDiscoveryRequiredRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = testDiscoveryRequiredRequest.overrideConfiguration().orElse( + null); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); return executeFuture; } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, testDiscoveryRequiredRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -273,8 +334,24 @@ private > T init(T builder) { .protocolVersion("1.1"); } + private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, + RequestOverrideConfiguration requestOverrideConfiguration) { + List publishers = null; + if (requestOverrideConfiguration != null) { + publishers = requestOverrideConfiguration.metricPublishers(); + } + if (publishers == null || publishers.isEmpty()) { + publishers = clientConfiguration.option(SdkClientOption.METRIC_PUBLISHERS); + } + if (publishers == null) { + publishers = Collections.emptyList(); + } + return publishers; + } + private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata) { return protocolFactory.createErrorResponseHandler(operationMetadata); } } + diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-sync.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-sync.java index 75d9d095cc41..1f3fe65691ca 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-sync.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-sync.java @@ -1,11 +1,14 @@ package software.amazon.awssdk.services.endpointdiscoverytest; import java.net.URI; +import java.util.Collections; +import java.util.List; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; @@ -14,6 +17,9 @@ import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRequest; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.protocols.json.AwsJsonProtocol; import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; import software.amazon.awssdk.protocols.json.BaseAwsJsonProtocolFactory; @@ -88,11 +94,21 @@ public DescribeEndpointsResponse describeEndpoints(DescribeEndpointsRequest desc HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "AwsEndpointDiscoveryTest"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DescribeEndpoints"); - return clientHandler.execute(new ClientExecutionParams() - .withOperationName("DescribeEndpoints").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(describeEndpointsRequest) - .withMarshaller(new DescribeEndpointsRequestMarshaller(protocolFactory))); + return clientHandler.execute(new ClientExecutionParams() + .withOperationName("DescribeEndpoints").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(describeEndpointsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new DescribeEndpointsRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, describeEndpointsRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -129,13 +145,22 @@ public TestDiscoveryIdentifiersRequiredResponse testDiscoveryIdentifiersRequired .defaultEndpoint(clientConfiguration.option(SdkClientOption.ENDPOINT)).build(); cachedEndpoint = endpointDiscoveryCache.get(key, endpointDiscoveryRequest); } + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "AwsEndpointDiscoveryTest"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "TestDiscoveryIdentifiersRequired"); - return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("TestDiscoveryIdentifiersRequired").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).discoveredEndpoint(cachedEndpoint) - .withInput(testDiscoveryIdentifiersRequiredRequest) - .withMarshaller(new TestDiscoveryIdentifiersRequiredRequestMarshaller(protocolFactory))); + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("TestDiscoveryIdentifiersRequired").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).discoveredEndpoint(cachedEndpoint) + .withInput(testDiscoveryIdentifiersRequiredRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new TestDiscoveryIdentifiersRequiredRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + testDiscoveryIdentifiersRequiredRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -171,12 +196,21 @@ public TestDiscoveryOptionalResponse testDiscoveryOptional(TestDiscoveryOptional .defaultEndpoint(clientConfiguration.option(SdkClientOption.ENDPOINT)).build(); cachedEndpoint = endpointDiscoveryCache.get(key, endpointDiscoveryRequest); } + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "AwsEndpointDiscoveryTest"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "TestDiscoveryOptional"); - return clientHandler.execute(new ClientExecutionParams() - .withOperationName("TestDiscoveryOptional").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).discoveredEndpoint(cachedEndpoint) - .withInput(testDiscoveryOptionalRequest) - .withMarshaller(new TestDiscoveryOptionalRequestMarshaller(protocolFactory))); + return clientHandler.execute(new ClientExecutionParams() + .withOperationName("TestDiscoveryOptional").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).discoveredEndpoint(cachedEndpoint) + .withInput(testDiscoveryOptionalRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new TestDiscoveryOptionalRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, testDiscoveryOptionalRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -212,12 +246,36 @@ public TestDiscoveryRequiredResponse testDiscoveryRequired(TestDiscoveryRequired .defaultEndpoint(clientConfiguration.option(SdkClientOption.ENDPOINT)).build(); cachedEndpoint = endpointDiscoveryCache.get(key, endpointDiscoveryRequest); } + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "AwsEndpointDiscoveryTest"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "TestDiscoveryRequired"); + + return clientHandler.execute(new ClientExecutionParams() + .withOperationName("TestDiscoveryRequired").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).discoveredEndpoint(cachedEndpoint) + .withInput(testDiscoveryRequiredRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new TestDiscoveryRequiredRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, testDiscoveryRequiredRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } - return clientHandler.execute(new ClientExecutionParams() - .withOperationName("TestDiscoveryRequired").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).discoveredEndpoint(cachedEndpoint) - .withInput(testDiscoveryRequiredRequest) - .withMarshaller(new TestDiscoveryRequiredRequestMarshaller(protocolFactory))); + private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, + RequestOverrideConfiguration requestOverrideConfiguration) { + List publishers = null; + if (requestOverrideConfiguration != null) { + publishers = requestOverrideConfiguration.metricPublishers(); + } + if (publishers == null || publishers.isEmpty()) { + publishers = clientConfiguration.option(SdkClientOption.METRIC_PUBLISHERS); + } + if (publishers == null) { + publishers = Collections.emptyList(); + } + return publishers; } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, @@ -236,3 +294,4 @@ public void close() { clientHandler.close(); } } + diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java index a6da11ecb6b2..e97f69cd5896 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java @@ -182,7 +182,7 @@ default CompletableFuture aPostOperationWithOu default CompletableFuture aPostOperationWithOutput( Consumer aPostOperationWithOutputRequest) { return aPostOperationWithOutput(APostOperationWithOutputRequest.builder().applyMutation(aPostOperationWithOutputRequest) - .build()); + .build()); } /** @@ -205,7 +205,7 @@ default CompletableFuture aPostOperationWithOu * target="_top">AWS API Documentation */ default CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { + Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { throw new UnsupportedOperationException(); } @@ -238,7 +238,7 @@ default CompletableFuture eventStreamOperation( Consumer eventStreamOperationRequest, Publisher requestStream, EventStreamOperationResponseHandler asyncResponseHandler) { return eventStreamOperation(EventStreamOperationRequest.builder().applyMutation(eventStreamOperationRequest).build(), - requestStream, asyncResponseHandler); + requestStream, asyncResponseHandler); } /** @@ -298,7 +298,7 @@ default CompletableFuture eventStream Publisher requestStream) { return eventStreamOperationWithOnlyInput( EventStreamOperationWithOnlyInputRequest.builder().applyMutation(eventStreamOperationWithOnlyInputRequest) - .build(), requestStream); + .build(), requestStream); } /** @@ -322,8 +322,8 @@ default CompletableFuture eventStream * target="_top">AWS API Documentation */ default CompletableFuture eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + EventStreamOperationWithOnlyOutputRequest eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { throw new UnsupportedOperationException(); } @@ -354,11 +354,11 @@ default CompletableFuture eventStreamOperationWithOnlyOutput( * target="_top">AWS API Documentation */ default CompletableFuture eventStreamOperationWithOnlyOutput( - Consumer eventStreamOperationWithOnlyOutputRequest, - EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { + Consumer eventStreamOperationWithOnlyOutputRequest, + EventStreamOperationWithOnlyOutputResponseHandler asyncResponseHandler) { return eventStreamOperationWithOnlyOutput( - EventStreamOperationWithOnlyOutputRequest.builder().applyMutation(eventStreamOperationWithOnlyOutputRequest) - .build(), asyncResponseHandler); + EventStreamOperationWithOnlyOutputRequest.builder().applyMutation(eventStreamOperationWithOnlyOutputRequest) + .build(), asyncResponseHandler); } /** @@ -422,7 +422,7 @@ default CompletableFuture getWithoutRequiredM default CompletableFuture getWithoutRequiredMembers( Consumer getWithoutRequiredMembersRequest) { return getWithoutRequiredMembers(GetWithoutRequiredMembersRequest.builder() - .applyMutation(getWithoutRequiredMembersRequest).build()); + .applyMutation(getWithoutRequiredMembersRequest).build()); } /** @@ -479,7 +479,7 @@ default CompletableFuture paginatedOper default CompletableFuture paginatedOperationWithResultKey( Consumer paginatedOperationWithResultKeyRequest) { return paginatedOperationWithResultKey(PaginatedOperationWithResultKeyRequest.builder() - .applyMutation(paginatedOperationWithResultKeyRequest).build()); + .applyMutation(paginatedOperationWithResultKeyRequest).build()); } /** @@ -552,6 +552,10 @@ default CompletableFuture paginatedOper * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} * operation. @@ -623,6 +627,10 @@ default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKey * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} * operation. @@ -696,6 +704,10 @@ default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKey * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} * operation. @@ -726,7 +738,7 @@ default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKey default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKeyPaginator( Consumer paginatedOperationWithResultKeyRequest) { return paginatedOperationWithResultKeyPaginator(PaginatedOperationWithResultKeyRequest.builder() - .applyMutation(paginatedOperationWithResultKeyRequest).build()); + .applyMutation(paginatedOperationWithResultKeyRequest).build()); } /** @@ -783,7 +795,7 @@ default CompletableFuture paginatedO default CompletableFuture paginatedOperationWithoutResultKey( Consumer paginatedOperationWithoutResultKeyRequest) { return paginatedOperationWithoutResultKey(PaginatedOperationWithoutResultKeyRequest.builder() - .applyMutation(paginatedOperationWithoutResultKeyRequest).build()); + .applyMutation(paginatedOperationWithoutResultKeyRequest).build()); } /** @@ -833,6 +845,10 @@ default CompletableFuture paginatedO * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest)} * operation. @@ -906,6 +922,10 @@ default PaginatedOperationWithoutResultKeyPublisher paginatedOperationWithoutRes * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest)} * operation. @@ -936,7 +956,7 @@ default PaginatedOperationWithoutResultKeyPublisher paginatedOperationWithoutRes default PaginatedOperationWithoutResultKeyPublisher paginatedOperationWithoutResultKeyPaginator( Consumer paginatedOperationWithoutResultKeyRequest) { return paginatedOperationWithoutResultKeyPaginator(PaginatedOperationWithoutResultKeyRequest.builder() - .applyMutation(paginatedOperationWithoutResultKeyRequest).build()); + .applyMutation(paginatedOperationWithoutResultKeyRequest).build()); } /** @@ -1001,7 +1021,7 @@ default CompletableFuture streamingInputOperati default CompletableFuture streamingInputOperation( Consumer streamingInputOperationRequest, AsyncRequestBody requestBody) { return streamingInputOperation(StreamingInputOperationRequest.builder().applyMutation(streamingInputOperationRequest) - .build(), requestBody); + .build(), requestBody); } /** @@ -1066,7 +1086,7 @@ default CompletableFuture streamingInputOperati default CompletableFuture streamingInputOperation( Consumer streamingInputOperationRequest, Path sourcePath) { return streamingInputOperation(StreamingInputOperationRequest.builder().applyMutation(streamingInputOperationRequest) - .build(), sourcePath); + .build(), sourcePath); } /** @@ -1179,7 +1199,7 @@ default CompletableFuture streamingInputOutputOperation( default CompletableFuture streamingInputOutputOperation( StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, Path sourcePath, Path destinationPath) { return streamingInputOutputOperation(streamingInputOutputOperationRequest, AsyncRequestBody.fromFile(sourcePath), - AsyncResponseTransformer.toFile(destinationPath)); + AsyncResponseTransformer.toFile(destinationPath)); } /** @@ -1288,7 +1308,7 @@ default CompletableFuture streamingOutputOperation( Consumer streamingOutputOperationRequest, AsyncResponseTransformer asyncResponseTransformer) { return streamingOutputOperation(StreamingOutputOperationRequest.builder().applyMutation(streamingOutputOperationRequest) - .build(), asyncResponseTransformer); + .build(), asyncResponseTransformer); } /** @@ -1351,7 +1371,7 @@ default CompletableFuture streamingOutputOpera default CompletableFuture streamingOutputOperation( Consumer streamingOutputOperationRequest, Path destinationPath) { return streamingOutputOperation(StreamingOutputOperationRequest.builder().applyMutation(streamingOutputOperationRequest) - .build(), destinationPath); + .build(), destinationPath); } /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java index 934b2724eaf5..d5dd67826475 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java @@ -1,5 +1,7 @@ package software.amazon.awssdk.services.json; +import java.util.Collections; +import java.util.List; import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -8,16 +10,21 @@ import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; import software.amazon.awssdk.awscore.exception.AwsServiceException; import software.amazon.awssdk.core.ApiName; +import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.core.util.VersionInfo; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.json.AwsJsonProtocol; import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; @@ -101,24 +108,33 @@ public final String serviceName() { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, JsonException { + AwsServiceException, SdkClientException, JsonException { String hostPrefix = "{StringMember}-foo."; Validate.paramNotBlank(aPostOperationRequest.stringMember(), "StringMember"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - APostOperationResponse::builder); + APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); - - return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation") - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + operationMetadata); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); + + return clientHandler.execute(new ClientExecutionParams() + .withOperationName("APostOperation").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).hostPrefixExpression(resolvedHostExpression) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -143,23 +159,32 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); + operationMetadata, APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); - - return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput") - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(aPostOperationWithOutputRequest) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + operationMetadata); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -184,23 +209,32 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); + operationMetadata, GetWithoutRequiredMembersResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); - - return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers") - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(getWithoutRequiredMembersRequest) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); + operationMetadata); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(getWithoutRequiredMembersRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + getWithoutRequiredMembersRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -221,23 +255,32 @@ public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( */ @Override public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); - - return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey") - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(paginatedOperationWithResultKeyRequest) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); + operationMetadata); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(paginatedOperationWithResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -259,7 +302,7 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( * The following are few ways to iterate through the response pages: *

* 1) Using a Stream - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client.paginatedOperationWithResultKeyPaginator(request);
@@ -268,7 +311,7 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey(
      * 
* * 2) Using For loop - * + * *
      * {
      *     @code
@@ -281,7 +324,7 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey(
      * 
* * 3) Use iterator directly - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client.paginatedOperationWithResultKeyPaginator(request);
@@ -289,6 +332,10 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey(
      * }
      * 
*

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} * operation. @@ -309,8 +356,8 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( */ @Override public PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPaginator( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { return new PaginatedOperationWithResultKeyIterable(this, applyPaginatorUserAgent(paginatedOperationWithResultKeyRequest)); } @@ -332,23 +379,32 @@ public PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPa */ @Override public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); - - return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey") - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(paginatedOperationWithoutResultKeyRequest) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); + operationMetadata); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(paginatedOperationWithoutResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -370,7 +426,7 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul * The following are few ways to iterate through the response pages: *

* 1) Using a Stream - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyIterable responses = client.paginatedOperationWithoutResultKeyPaginator(request);
@@ -379,7 +435,7 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul
      * 
* * 2) Using For loop - * + * *
      * {
      *     @code
@@ -392,7 +448,7 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul
      * 
* * 3) Use iterator directly - * + * *
      * {@code
      * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyIterable responses = client.paginatedOperationWithoutResultKeyPaginator(request);
@@ -400,6 +456,10 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul
      * }
      * 
*

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest)} * operation. @@ -420,10 +480,10 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul */ @Override public PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResultKeyPaginator( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { return new PaginatedOperationWithoutResultKeyIterable(this, - applyPaginatorUserAgent(paginatedOperationWithoutResultKeyRequest)); + applyPaginatorUserAgent(paginatedOperationWithoutResultKeyRequest)); } /** @@ -434,11 +494,11 @@ public PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResul * The content to send to the service. A {@link RequestBody} can be created using one of several factory * methods for various sources of data. For example, to create a request body from a file you can do the * following. - * + * *

      * {@code RequestBody.fromFile(new File("myfile.txt"))}
      * 
- * + * * See documentation in {@link RequestBody} for additional details and which sources of data are supported. * The service documentation for the request content is as follows 'This be a stream' * @return Result of the StreamingInputOperation operation returned by the service. @@ -455,26 +515,37 @@ public PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResul */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); + operationMetadata, StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); - - return clientHandler.execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withInput(streamingInputOperationRequest) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + operationMetadata); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -485,20 +556,20 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe * The content to send to the service. A {@link RequestBody} can be created using one of several factory * methods for various sources of data. For example, to create a request body from a file you can do the * following. - * + * *
      * {@code RequestBody.fromFile(new File("myfile.txt"))}
      * 
- * + * * See documentation in {@link RequestBody} for additional details and which sources of data are supported. * The service documentation for the request content is as follows 'This be a stream' * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingInputOutputOperationRequest and an InputStream to the response content are provided as parameters - * to the callback. The callback may return a transformed type which will be the return value of this method. - * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this - * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The - * service documentation for the response content is as follows 'This be a stream'. + * StreamingInputOutputOperationResponse and an InputStream to the response content are provided as + * parameters to the callback. The callback may return a transformed type which will be the return value of + * this method. See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing + * this interface and for links to pre-canned implementations for common scenarios like downloading to a + * file. The service documentation for the response content is as follows 'This be a stream'. * @return The transformed result of the ResponseTransformer. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for @@ -513,31 +584,43 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { streamingInputOutputOperationRequest = applySignerOverride(streamingInputOutputOperationRequest, - Aws4UnsignedPayloadSigner.create()); + Aws4UnsignedPayloadSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); + operationMetadata, StreamingInputOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); - - return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withInput(streamingInputOutputOperationRequest) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); + operationMetadata); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); + + return clientHandler.execute( + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(streamingInputOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -546,8 +629,8 @@ public ReturnT streamingInputOutputOperation( * @param streamingOutputOperationRequest * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingInputOutputOperationRequest and an InputStream to the response content are provided as parameters - * to the callback. The callback may return a transformed type which will be the return value of this method. + * StreamingOutputOperationResponse and an InputStream to the response content are provided as parameters to + * the callback. The callback may return a transformed type which will be the return value of this method. * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The * service documentation for the response content is as follows 'This be a stream'. @@ -565,39 +648,63 @@ public ReturnT streamingInputOutputOperation( */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); + operationMetadata, StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); - - return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation") - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(streamingOutputOperationRequest) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + operationMetadata); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); + + return clientHandler.execute( + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + + private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, + RequestOverrideConfiguration requestOverrideConfiguration) { + List publishers = null; + if (requestOverrideConfiguration != null) { + publishers = requestOverrideConfiguration.metricPublishers(); + } + if (publishers == null || publishers.isEmpty()) { + publishers = clientConfiguration.option(SdkClientOption.METRIC_PUBLISHERS); + } + if (publishers == null) { + publishers = Collections.emptyList(); + } + return publishers; } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { + JsonOperationMetadata operationMetadata) { return protocolFactory.createErrorResponseHandler(operationMetadata); } private > T init(T builder) { return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.REST_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); + .clientConfiguration(clientConfiguration) + .defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.REST_JSON) + .protocolVersion("1.1") + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); } @Override @@ -607,10 +714,10 @@ public void close() { private T applyPaginatorUserAgent(T request) { Consumer userAgentApplier = b -> b.addApiName(ApiName.builder() - .version(VersionInfo.SDK_VERSION).name("PAGINATED").build()); + .version(VersionInfo.SDK_VERSION).name("PAGINATED").build()); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(userAgentApplier).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(userAgentApplier).build())); + .map(c -> c.toBuilder().applyMutation(userAgentApplier).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(userAgentApplier).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -620,8 +727,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -630,3 +737,4 @@ public JsonUtilities utilities() { return JsonUtilities.create(param1, param2, param3); } } + diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java index ce2d67987cce..e820c2352766 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java @@ -354,6 +354,10 @@ default PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( * } * *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} * operation. @@ -426,6 +430,10 @@ default PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyP * } * *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} * operation. @@ -499,6 +507,10 @@ default PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyP * } * *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} * operation. @@ -630,6 +642,10 @@ default PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResu * } * *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest)} * operation. @@ -703,6 +719,10 @@ default PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResu * } * *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest)} * operation. @@ -887,7 +907,7 @@ default StreamingInputOperationResponse streamingInputOperation( * The service documentation for the request content is as follows 'This be a stream' * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingInputOutputOperationRequest and an InputStream to the response content are provided as parameters + * StreamingInputOutputOperationResponse and an InputStream to the response content are provided as parameters * to the callback. The callback may return a transformed type which will be the return value of this method. * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The @@ -934,7 +954,7 @@ default ReturnT streamingInputOutputOperation( * The service documentation for the request content is as follows 'This be a stream' * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingInputOutputOperationRequest and an InputStream to the response content are provided as parameters + * StreamingInputOutputOperationResponse and an InputStream to the response content are provided as parameters * to the callback. The callback may return a transformed type which will be the return value of this method. * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The @@ -1041,7 +1061,7 @@ default StreamingInputOutputOperationResponse streamingInputOutputOperation( * @param streamingOutputOperationRequest * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingInputOutputOperationRequest and an InputStream to the response content are provided as parameters + * StreamingOutputOperationResponse and an InputStream to the response content are provided as parameters * to the callback. The callback may return a transformed type which will be the return value of this method. * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The @@ -1076,7 +1096,7 @@ default ReturnT streamingOutputOperation(StreamingOutputOperationReque * request. * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingInputOutputOperationRequest and an InputStream to the response content are provided as parameters + * StreamingOutputOperationResponse and an InputStream to the response content are provided as parameters * to the callback. The callback may return a transformed type which will be the return value of this method. * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java index e6ddff92b7f4..17cda3c110f3 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java @@ -2,20 +2,28 @@ import static software.amazon.awssdk.utils.FunctionalUtils.runAndLogError; +import java.util.Collections; +import java.util.List; import java.util.concurrent.CompletableFuture; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.awscore.client.handler.AwsAsyncClientHandler; import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.client.handler.AsyncClientHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.query.AwsQueryProtocolFactory; import software.amazon.awssdk.services.query.model.APostOperationRequest; @@ -86,23 +94,36 @@ public final String serviceName() { */ @Override public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation") - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); - return executeFuture; + .execute(new ClientExecutionParams() + .withOperationName("APostOperation") + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).hostPrefixExpression(resolvedHostExpression) + .withInput(aPostOperationRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = aPostOperationRequest.overrideConfiguration().orElse(null); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -132,22 +153,35 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput") - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(aPostOperationWithOutputRequest)); - return executeFuture; + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput") + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationWithOutputRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = aPostOperationWithOutputRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -178,25 +212,38 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); - return executeFuture; + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody).withInput(streamingInputOperationRequest)); + AwsRequestOverrideConfiguration requestOverrideConfig = streamingInputOperationRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); } catch (Throwable t) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -227,31 +274,43 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation") - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); - executeFuture.whenComplete((r, e) -> { + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation") + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); + AwsRequestOverrideConfiguration requestOverrideConfig = streamingOutputOperationRequest.overrideConfiguration() + .orElse(null); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseTransformer.exceptionOccurred(e)); + () -> asyncResponseTransformer.exceptionOccurred(e)); } + List metricPublishers = resolveMetricPublishers(clientConfiguration, requestOverrideConfig); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); }); - return executeFuture; + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseTransformer.exceptionOccurred(t)); + () -> asyncResponseTransformer.exceptionOccurred(t)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } } @@ -263,10 +322,26 @@ public void close() { private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + } + + private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, + RequestOverrideConfiguration requestOverrideConfiguration) { + List publishers = null; + if (requestOverrideConfiguration != null) { + publishers = requestOverrideConfiguration.metricPublishers(); + } + if (publishers == null || publishers.isEmpty()) { + publishers = clientConfiguration.option(SdkClientOption.METRIC_PUBLISHERS); + } + if (publishers == null) { + publishers = Collections.emptyList(); + } + return publishers; } } + diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java index f9c451e880b3..28cc96b95475 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java @@ -1,17 +1,24 @@ package software.amazon.awssdk.services.query; +import java.util.Collections; +import java.util.List; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.query.AwsQueryProtocolFactory; import software.amazon.awssdk.services.query.model.APostOperationRequest; @@ -76,19 +83,29 @@ public final String serviceName() { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, QueryException { + AwsServiceException, SdkClientException, QueryException { String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); - return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).hostPrefixExpression(resolvedHostExpression) - .withInput(aPostOperationRequest).withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + return clientHandler.execute(new ClientExecutionParams() + .withOperationName("APostOperation").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).hostPrefixExpression(resolvedHostExpression) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -113,19 +130,29 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, QueryException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); - return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(aPostOperationWithOutputRequest) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -136,11 +163,11 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( * The content to send to the service. A {@link RequestBody} can be created using one of several factory * methods for various sources of data. For example, to create a request body from a file you can do the * following. - * + * *

      * {@code RequestBody.fromFile(new File("myfile.txt"))}
      * 
- * + * * See documentation in {@link RequestBody} for additional details and which sources of data are supported. * The service documentation for the request content is as follows 'This be a stream' * @return Result of the StreamingInputOperation operation returned by the service. @@ -157,23 +184,34 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); - return clientHandler.execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withInput(streamingInputOperationRequest) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } } /** @@ -182,7 +220,7 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe * @param streamingOutputOperationRequest * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled - * StreamingInputOperationRequest and an InputStream to the response content are provided as parameters to + * StreamingOutputOperationResponse and an InputStream to the response content are provided as parameters to * the callback. The callback may return a transformed type which will be the return value of this method. * See {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this * interface and for links to pre-canned implementations for common scenarios like downloading to a file. The @@ -201,28 +239,53 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, QueryException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + MetricCollector apiCallMetricCollector = MetricCollector.create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); - return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withInput(streamingOutputOperationRequest) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + return clientHandler.execute( + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + } finally { + List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest + .overrideConfiguration().orElse(null)); + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + + private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, + RequestOverrideConfiguration requestOverrideConfiguration) { + List publishers = null; + if (requestOverrideConfiguration != null) { + publishers = requestOverrideConfiguration.metricPublishers(); + } + if (publishers == null || publishers.isEmpty()) { + publishers = clientConfiguration.option(SdkClientOption.METRIC_PUBLISHERS); + } + if (publishers == null) { + publishers = Collections.emptyList(); + } + return publishers; } private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } @Override @@ -230,3 +293,4 @@ public void close() { clientHandler.close(); } } + diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/endpointdiscovery/test-async-cache-loader.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/endpointdiscovery/test-async-cache-loader.java index 2b88b7a1a629..892c6cc8b45d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/endpointdiscovery/test-async-cache-loader.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/endpointdiscovery/test-async-cache-loader.java @@ -2,6 +2,7 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.List; import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -9,6 +10,7 @@ import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryEndpoint; import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRequest; import software.amazon.awssdk.services.endpointdiscoverytest.model.Endpoint; +import software.amazon.awssdk.utils.Validate; @SdkInternalApi @Generated("software.amazon.awssdk:codegen") @@ -26,14 +28,17 @@ public static EndpointDiscoveryTestAsyncEndpointDiscoveryCacheLoader create(Endp @Override public CompletableFuture discoverEndpoint(EndpointDiscoveryRequest endpointDiscoveryRequest) { return client.describeEndpoints( - software.amazon.awssdk.services.endpointdiscoverytest.model.DescribeEndpointsRequest.builder().build()) - .thenApply( - r -> { - Endpoint endpoint = r.endpoints().get(0); - return EndpointDiscoveryEndpoint.builder() - .endpoint(toUri(endpoint.address(), endpointDiscoveryRequest.defaultEndpoint())) - .expirationTime(Instant.now().plus(endpoint.cachePeriodInMinutes(), ChronoUnit.MINUTES)) - .build(); - }); + software.amazon.awssdk.services.endpointdiscoverytest.model.DescribeEndpointsRequest.builder().build()) + .thenApply( + r -> { + List endpoints = r.endpoints(); + Validate.notEmpty(endpoints, + "Endpoints returned by service for endpoint discovery must not be empty."); + Endpoint endpoint = endpoints.get(0); + return EndpointDiscoveryEndpoint.builder() + .endpoint(toUri(endpoint.address(), endpointDiscoveryRequest.defaultEndpoint())) + .expirationTime(Instant.now().plus(endpoint.cachePeriodInMinutes(), ChronoUnit.MINUTES)) + .build(); + }); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/endpointdiscovery/test-sync-cache-loader.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/endpointdiscovery/test-sync-cache-loader.java index 9873fead9e8d..8262c0f52699 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/endpointdiscovery/test-sync-cache-loader.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/endpointdiscovery/test-sync-cache-loader.java @@ -2,6 +2,7 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.List; import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -10,6 +11,7 @@ import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryRequest; import software.amazon.awssdk.services.endpointdiscoverytest.model.DescribeEndpointsResponse; import software.amazon.awssdk.services.endpointdiscoverytest.model.Endpoint; +import software.amazon.awssdk.utils.Validate; @SdkInternalApi @Generated("software.amazon.awssdk:codegen") @@ -28,12 +30,14 @@ public static EndpointDiscoveryTestEndpointDiscoveryCacheLoader create(EndpointD public CompletableFuture discoverEndpoint(EndpointDiscoveryRequest endpointDiscoveryRequest) { return CompletableFuture.supplyAsync(() -> { DescribeEndpointsResponse response = client - .describeEndpoints(software.amazon.awssdk.services.endpointdiscoverytest.model.DescribeEndpointsRequest - .builder().build()); - Endpoint endpoint = response.endpoints().get(0); + .describeEndpoints(software.amazon.awssdk.services.endpointdiscoverytest.model.DescribeEndpointsRequest + .builder().build()); + List endpoints = response.endpoints(); + Validate.notEmpty(endpoints, "Endpoints returned by service for endpoint discovery must not be empty."); + Endpoint endpoint = endpoints.get(0); return EndpointDiscoveryEndpoint.builder() - .endpoint(toUri(endpoint.address(), endpointDiscoveryRequest.defaultEndpoint())) - .expirationTime(Instant.now().plus(endpoint.cachePeriodInMinutes(), ChronoUnit.MINUTES)).build(); + .endpoint(toUri(endpoint.address(), endpointDiscoveryRequest.defaultEndpoint())) + .expirationTime(Instant.now().plus(endpoint.cachePeriodInMinutes(), ChronoUnit.MINUTES)).build(); }); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java index 5837fbeee093..ee962fa72e23 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java @@ -2042,7 +2042,7 @@ public final void setSimpleList(Collection simpleList) { this.simpleList = ListOfStringsCopier.copy(simpleList); } - public final Collection getListOfEnumsAsStrings() { + public final Collection getListOfEnums() { return listOfEnums; } @@ -2072,7 +2072,7 @@ public final Builder listOfEnums(EnumType... listOfEnums) { return this; } - public final void setListOfEnumsWithStrings(Collection listOfEnums) { + public final void setListOfEnums(Collection listOfEnums) { this.listOfEnums = ListOfEnumsCopier.copy(listOfEnums); } @@ -2127,7 +2127,7 @@ public final void setListOfStructs(Collection listOfSt this.listOfStructs = ListOfSimpleStructsCopier.copyFromBuilder(listOfStructs); } - public final Collection> getListOfMapOfEnumToStringAsStrings() { + public final Collection> getListOfMapOfEnumToString() { return listOfMapOfEnumToString; } @@ -2144,7 +2144,7 @@ public final Builder listOfMapOfEnumToStringWithStrings(Map... l return this; } - public final void setListOfMapOfEnumToStringWithStrings(Collection> listOfMapOfEnumToString) { + public final void setListOfMapOfEnumToString(Collection> listOfMapOfEnumToString) { this.listOfMapOfEnumToString = ListOfMapOfEnumToStringCopier.copy(listOfMapOfEnumToString); } @@ -2191,7 +2191,7 @@ public final void setMapOfStringToSimpleStruct(Map getMapOfEnumToEnumAsStrings() { + public final Map getMapOfEnumToEnum() { return mapOfEnumToEnum; } @@ -2207,11 +2207,11 @@ public final Builder mapOfEnumToEnum(Map mapOfEnumToEnum) { return this; } - public final void setMapOfEnumToEnumWithStrings(Map mapOfEnumToEnum) { + public final void setMapOfEnumToEnum(Map mapOfEnumToEnum) { this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copy(mapOfEnumToEnum); } - public final Map getMapOfEnumToStringAsStrings() { + public final Map getMapOfEnumToString() { return mapOfEnumToString; } @@ -2227,11 +2227,11 @@ public final Builder mapOfEnumToString(Map mapOfEnumToString) return this; } - public final void setMapOfEnumToStringWithStrings(Map mapOfEnumToString) { + public final void setMapOfEnumToString(Map mapOfEnumToString) { this.mapOfEnumToString = MapOfEnumToStringCopier.copy(mapOfEnumToString); } - public final Map getMapOfStringToEnumAsStrings() { + public final Map getMapOfStringToEnum() { return mapOfStringToEnum; } @@ -2247,11 +2247,11 @@ public final Builder mapOfStringToEnum(Map mapOfStringToEnum) return this; } - public final void setMapOfStringToEnumWithStrings(Map mapOfStringToEnum) { + public final void setMapOfStringToEnum(Map mapOfStringToEnum) { this.mapOfStringToEnum = MapOfStringToEnumCopier.copy(mapOfStringToEnum); } - public final Map getMapOfEnumToSimpleStructAsStrings() { + public final Map getMapOfEnumToSimpleStruct() { return mapOfEnumToSimpleStruct != null ? CollectionUtils.mapValues(mapOfEnumToSimpleStruct, SimpleStruct::toBuilder) : null; } @@ -2268,11 +2268,11 @@ public final Builder mapOfEnumToSimpleStruct(Map mapOfEn return this; } - public final void setMapOfEnumToSimpleStructWithStrings(Map mapOfEnumToSimpleStruct) { + public final void setMapOfEnumToSimpleStruct(Map mapOfEnumToSimpleStruct) { this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copyFromBuilder(mapOfEnumToSimpleStruct); } - public final Map> getMapOfEnumToListOfEnumsAsStrings() { + public final Map> getMapOfEnumToListOfEnums() { return mapOfEnumToListOfEnums; } @@ -2288,11 +2288,11 @@ public final Builder mapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { + public final void setMapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copy(mapOfEnumToListOfEnums); } - public final Map> getMapOfEnumToMapOfStringToEnumAsStrings() { + public final Map> getMapOfEnumToMapOfStringToEnum() { return mapOfEnumToMapOfStringToEnum; } @@ -2308,7 +2308,7 @@ public final Builder mapOfEnumToMapOfStringToEnum(Map> mapOfEnumToMapOfStringToEnum) { + public final void setMapOfEnumToMapOfStringToEnum(Map> mapOfEnumToMapOfStringToEnum) { this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copy(mapOfEnumToMapOfStringToEnum); } @@ -2450,7 +2450,7 @@ public final void setPolymorphicTypeWithoutSubTypes(SubTypeOne.BuilderImpl polym : null; } - public final String getEnumTypeAsString() { + public final String getEnumType() { return enumType; } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java index 925fed5ebbb0..7fc9806cc78c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java @@ -2035,7 +2035,7 @@ public final void setSimpleList(Collection simpleList) { this.simpleList = ListOfStringsCopier.copy(simpleList); } - public final Collection getListOfEnumsAsStrings() { + public final Collection getListOfEnums() { return listOfEnums; } @@ -2065,7 +2065,7 @@ public final Builder listOfEnums(EnumType... listOfEnums) { return this; } - public final void setListOfEnumsWithStrings(Collection listOfEnums) { + public final void setListOfEnums(Collection listOfEnums) { this.listOfEnums = ListOfEnumsCopier.copy(listOfEnums); } @@ -2120,7 +2120,7 @@ public final void setListOfStructs(Collection listOfSt this.listOfStructs = ListOfSimpleStructsCopier.copyFromBuilder(listOfStructs); } - public final Collection> getListOfMapOfEnumToStringAsStrings() { + public final Collection> getListOfMapOfEnumToString() { return listOfMapOfEnumToString; } @@ -2137,7 +2137,7 @@ public final Builder listOfMapOfEnumToStringWithStrings(Map... l return this; } - public final void setListOfMapOfEnumToStringWithStrings(Collection> listOfMapOfEnumToString) { + public final void setListOfMapOfEnumToString(Collection> listOfMapOfEnumToString) { this.listOfMapOfEnumToString = ListOfMapOfEnumToStringCopier.copy(listOfMapOfEnumToString); } @@ -2184,7 +2184,7 @@ public final void setMapOfStringToSimpleStruct(Map getMapOfEnumToEnumAsStrings() { + public final Map getMapOfEnumToEnum() { return mapOfEnumToEnum; } @@ -2200,11 +2200,11 @@ public final Builder mapOfEnumToEnum(Map mapOfEnumToEnum) { return this; } - public final void setMapOfEnumToEnumWithStrings(Map mapOfEnumToEnum) { + public final void setMapOfEnumToEnum(Map mapOfEnumToEnum) { this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copy(mapOfEnumToEnum); } - public final Map getMapOfEnumToStringAsStrings() { + public final Map getMapOfEnumToString() { return mapOfEnumToString; } @@ -2220,11 +2220,11 @@ public final Builder mapOfEnumToString(Map mapOfEnumToString) return this; } - public final void setMapOfEnumToStringWithStrings(Map mapOfEnumToString) { + public final void setMapOfEnumToString(Map mapOfEnumToString) { this.mapOfEnumToString = MapOfEnumToStringCopier.copy(mapOfEnumToString); } - public final Map getMapOfStringToEnumAsStrings() { + public final Map getMapOfStringToEnum() { return mapOfStringToEnum; } @@ -2240,11 +2240,11 @@ public final Builder mapOfStringToEnum(Map mapOfStringToEnum) return this; } - public final void setMapOfStringToEnumWithStrings(Map mapOfStringToEnum) { + public final void setMapOfStringToEnum(Map mapOfStringToEnum) { this.mapOfStringToEnum = MapOfStringToEnumCopier.copy(mapOfStringToEnum); } - public final Map getMapOfEnumToSimpleStructAsStrings() { + public final Map getMapOfEnumToSimpleStruct() { return mapOfEnumToSimpleStruct != null ? CollectionUtils.mapValues(mapOfEnumToSimpleStruct, SimpleStruct::toBuilder) : null; } @@ -2261,11 +2261,11 @@ public final Builder mapOfEnumToSimpleStruct(Map mapOfEn return this; } - public final void setMapOfEnumToSimpleStructWithStrings(Map mapOfEnumToSimpleStruct) { + public final void setMapOfEnumToSimpleStruct(Map mapOfEnumToSimpleStruct) { this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copyFromBuilder(mapOfEnumToSimpleStruct); } - public final Map> getMapOfEnumToListOfEnumsAsStrings() { + public final Map> getMapOfEnumToListOfEnums() { return mapOfEnumToListOfEnums; } @@ -2281,11 +2281,11 @@ public final Builder mapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { + public final void setMapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copy(mapOfEnumToListOfEnums); } - public final Map> getMapOfEnumToMapOfStringToEnumAsStrings() { + public final Map> getMapOfEnumToMapOfStringToEnum() { return mapOfEnumToMapOfStringToEnum; } @@ -2301,7 +2301,7 @@ public final Builder mapOfEnumToMapOfStringToEnum(Map> mapOfEnumToMapOfStringToEnum) { + public final void setMapOfEnumToMapOfStringToEnum(Map> mapOfEnumToMapOfStringToEnum) { this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copy(mapOfEnumToMapOfStringToEnum); } @@ -2443,7 +2443,7 @@ public final void setPolymorphicTypeWithoutSubTypes(SubTypeOne.BuilderImpl polym : null; } - public final String getEnumTypeAsString() { + public final String getEnumType() { return enumType; } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstream.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstream.java index e059c6535ec4..9b87d17442ad 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstream.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstream.java @@ -8,7 +8,7 @@ import software.amazon.awssdk.core.SdkPojo; /** - * Base interface for all event types of the EventStreamOperation API. + * Base interface for all event types in EventStream. */ @Generated("software.amazon.awssdk:codegen") @SdkPublicApi @@ -26,7 +26,7 @@ public List> sdkFields() { public void accept(EventStreamOperationResponseHandler.Visitor visitor) { visitor.visitDefault(this); } - };; + }; /** * Calls the appropriate visit method depending on the subtype of {@link EventStream}. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventstream.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventstream.java index 1f75f8174e50..7cdc67cb90ac 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventstream.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventstream.java @@ -4,7 +4,7 @@ import software.amazon.awssdk.annotations.SdkPublicApi; /** - * Base interface for all event types of the EventStreamOperation API. + * Base interface for all event types in InputEventStream. */ @Generated("software.amazon.awssdk:codegen") @SdkPublicApi diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventstreamtwo.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventstreamtwo.java index c1f5b58c214d..c1dbfa8e9f48 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventstreamtwo.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventstreamtwo.java @@ -4,7 +4,7 @@ import software.amazon.awssdk.annotations.SdkPublicApi; /** - * Base interface for all event types of the EventStreamOperationWithOnlyInput API. + * Base interface for all event types in InputEventStreamTwo. */ @Generated("software.amazon.awssdk:codegen") @SdkPublicApi diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/alltypesrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/alltypesrequest.java index dc32c003d951..0fb19133f4c5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/alltypesrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/alltypesrequest.java @@ -2040,7 +2040,7 @@ public final void setSimpleList(Collection simpleList) { this.simpleList = ListOfStringsCopier.copy(simpleList); } - public final Collection getListOfEnumsAsStrings() { + public final Collection getListOfEnums() { return listOfEnums; } @@ -2070,7 +2070,7 @@ public final Builder listOfEnums(EnumType... listOfEnums) { return this; } - public final void setListOfEnumsWithStrings(Collection listOfEnums) { + public final void setListOfEnums(Collection listOfEnums) { this.listOfEnums = ListOfEnumsCopier.copy(listOfEnums); } @@ -2125,7 +2125,7 @@ public final void setListOfStructs(Collection listOfSt this.listOfStructs = ListOfSimpleStructsCopier.copyFromBuilder(listOfStructs); } - public final Collection> getListOfMapOfEnumToStringAsStrings() { + public final Collection> getListOfMapOfEnumToString() { return listOfMapOfEnumToString; } @@ -2142,7 +2142,7 @@ public final Builder listOfMapOfEnumToStringWithStrings(Map... l return this; } - public final void setListOfMapOfEnumToStringWithStrings(Collection> listOfMapOfEnumToString) { + public final void setListOfMapOfEnumToString(Collection> listOfMapOfEnumToString) { this.listOfMapOfEnumToString = ListOfMapOfEnumToStringCopier.copy(listOfMapOfEnumToString); } @@ -2189,7 +2189,7 @@ public final void setMapOfStringToSimpleStruct(Map getMapOfEnumToEnumAsStrings() { + public final Map getMapOfEnumToEnum() { return mapOfEnumToEnum; } @@ -2205,11 +2205,11 @@ public final Builder mapOfEnumToEnum(Map mapOfEnumToEnum) { return this; } - public final void setMapOfEnumToEnumWithStrings(Map mapOfEnumToEnum) { + public final void setMapOfEnumToEnum(Map mapOfEnumToEnum) { this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copy(mapOfEnumToEnum); } - public final Map getMapOfEnumToStringAsStrings() { + public final Map getMapOfEnumToString() { return mapOfEnumToString; } @@ -2225,11 +2225,11 @@ public final Builder mapOfEnumToString(Map mapOfEnumToString) return this; } - public final void setMapOfEnumToStringWithStrings(Map mapOfEnumToString) { + public final void setMapOfEnumToString(Map mapOfEnumToString) { this.mapOfEnumToString = MapOfEnumToStringCopier.copy(mapOfEnumToString); } - public final Map getMapOfStringToEnumAsStrings() { + public final Map getMapOfStringToEnum() { return mapOfStringToEnum; } @@ -2245,11 +2245,11 @@ public final Builder mapOfStringToEnum(Map mapOfStringToEnum) return this; } - public final void setMapOfStringToEnumWithStrings(Map mapOfStringToEnum) { + public final void setMapOfStringToEnum(Map mapOfStringToEnum) { this.mapOfStringToEnum = MapOfStringToEnumCopier.copy(mapOfStringToEnum); } - public final Map getMapOfEnumToSimpleStructAsStrings() { + public final Map getMapOfEnumToSimpleStruct() { return mapOfEnumToSimpleStruct != null ? CollectionUtils.mapValues(mapOfEnumToSimpleStruct, SimpleStruct::toBuilder) : null; } @@ -2266,11 +2266,11 @@ public final Builder mapOfEnumToSimpleStruct(Map mapOfEn return this; } - public final void setMapOfEnumToSimpleStructWithStrings(Map mapOfEnumToSimpleStruct) { + public final void setMapOfEnumToSimpleStruct(Map mapOfEnumToSimpleStruct) { this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copyFromBuilder(mapOfEnumToSimpleStruct); } - public final Map> getMapOfEnumToListOfEnumsAsStrings() { + public final Map> getMapOfEnumToListOfEnums() { return mapOfEnumToListOfEnums; } @@ -2286,11 +2286,11 @@ public final Builder mapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { + public final void setMapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copy(mapOfEnumToListOfEnums); } - public final Map> getMapOfEnumToMapOfStringToEnumAsStrings() { + public final Map> getMapOfEnumToMapOfStringToEnum() { return mapOfEnumToMapOfStringToEnum; } @@ -2306,7 +2306,7 @@ public final Builder mapOfEnumToMapOfStringToEnum(Map> mapOfEnumToMapOfStringToEnum) { + public final void setMapOfEnumToMapOfStringToEnum(Map> mapOfEnumToMapOfStringToEnum) { this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copy(mapOfEnumToMapOfStringToEnum); } @@ -2448,7 +2448,7 @@ public final void setPolymorphicTypeWithoutSubTypes(SubTypeOne.BuilderImpl polym : null; } - public final String getEnumTypeAsString() { + public final String getEnumType() { return enumType; } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/alltypesresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/alltypesresponse.java index 159af2333b92..3bc80f9615b6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/alltypesresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nonautoconstructcontainers/alltypesresponse.java @@ -2033,7 +2033,7 @@ public final void setSimpleList(Collection simpleList) { this.simpleList = ListOfStringsCopier.copy(simpleList); } - public final Collection getListOfEnumsAsStrings() { + public final Collection getListOfEnums() { return listOfEnums; } @@ -2063,7 +2063,7 @@ public final Builder listOfEnums(EnumType... listOfEnums) { return this; } - public final void setListOfEnumsWithStrings(Collection listOfEnums) { + public final void setListOfEnums(Collection listOfEnums) { this.listOfEnums = ListOfEnumsCopier.copy(listOfEnums); } @@ -2118,7 +2118,7 @@ public final void setListOfStructs(Collection listOfSt this.listOfStructs = ListOfSimpleStructsCopier.copyFromBuilder(listOfStructs); } - public final Collection> getListOfMapOfEnumToStringAsStrings() { + public final Collection> getListOfMapOfEnumToString() { return listOfMapOfEnumToString; } @@ -2135,7 +2135,7 @@ public final Builder listOfMapOfEnumToStringWithStrings(Map... l return this; } - public final void setListOfMapOfEnumToStringWithStrings(Collection> listOfMapOfEnumToString) { + public final void setListOfMapOfEnumToString(Collection> listOfMapOfEnumToString) { this.listOfMapOfEnumToString = ListOfMapOfEnumToStringCopier.copy(listOfMapOfEnumToString); } @@ -2182,7 +2182,7 @@ public final void setMapOfStringToSimpleStruct(Map getMapOfEnumToEnumAsStrings() { + public final Map getMapOfEnumToEnum() { return mapOfEnumToEnum; } @@ -2198,11 +2198,11 @@ public final Builder mapOfEnumToEnum(Map mapOfEnumToEnum) { return this; } - public final void setMapOfEnumToEnumWithStrings(Map mapOfEnumToEnum) { + public final void setMapOfEnumToEnum(Map mapOfEnumToEnum) { this.mapOfEnumToEnum = MapOfEnumToEnumCopier.copy(mapOfEnumToEnum); } - public final Map getMapOfEnumToStringAsStrings() { + public final Map getMapOfEnumToString() { return mapOfEnumToString; } @@ -2218,11 +2218,11 @@ public final Builder mapOfEnumToString(Map mapOfEnumToString) return this; } - public final void setMapOfEnumToStringWithStrings(Map mapOfEnumToString) { + public final void setMapOfEnumToString(Map mapOfEnumToString) { this.mapOfEnumToString = MapOfEnumToStringCopier.copy(mapOfEnumToString); } - public final Map getMapOfStringToEnumAsStrings() { + public final Map getMapOfStringToEnum() { return mapOfStringToEnum; } @@ -2238,11 +2238,11 @@ public final Builder mapOfStringToEnum(Map mapOfStringToEnum) return this; } - public final void setMapOfStringToEnumWithStrings(Map mapOfStringToEnum) { + public final void setMapOfStringToEnum(Map mapOfStringToEnum) { this.mapOfStringToEnum = MapOfStringToEnumCopier.copy(mapOfStringToEnum); } - public final Map getMapOfEnumToSimpleStructAsStrings() { + public final Map getMapOfEnumToSimpleStruct() { return mapOfEnumToSimpleStruct != null ? CollectionUtils.mapValues(mapOfEnumToSimpleStruct, SimpleStruct::toBuilder) : null; } @@ -2259,11 +2259,11 @@ public final Builder mapOfEnumToSimpleStruct(Map mapOfEn return this; } - public final void setMapOfEnumToSimpleStructWithStrings(Map mapOfEnumToSimpleStruct) { + public final void setMapOfEnumToSimpleStruct(Map mapOfEnumToSimpleStruct) { this.mapOfEnumToSimpleStruct = MapOfEnumToSimpleStructCopier.copyFromBuilder(mapOfEnumToSimpleStruct); } - public final Map> getMapOfEnumToListOfEnumsAsStrings() { + public final Map> getMapOfEnumToListOfEnums() { return mapOfEnumToListOfEnums; } @@ -2279,11 +2279,11 @@ public final Builder mapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { + public final void setMapOfEnumToListOfEnums(Map> mapOfEnumToListOfEnums) { this.mapOfEnumToListOfEnums = MapOfEnumToListOfEnumsCopier.copy(mapOfEnumToListOfEnums); } - public final Map> getMapOfEnumToMapOfStringToEnumAsStrings() { + public final Map> getMapOfEnumToMapOfStringToEnum() { return mapOfEnumToMapOfStringToEnum; } @@ -2299,7 +2299,7 @@ public final Builder mapOfEnumToMapOfStringToEnum(Map> mapOfEnumToMapOfStringToEnum) { + public final void setMapOfEnumToMapOfStringToEnum(Map> mapOfEnumToMapOfStringToEnum) { this.mapOfEnumToMapOfStringToEnum = MapOfEnumToMapOfStringToEnumCopier.copy(mapOfEnumToMapOfStringToEnum); } @@ -2441,7 +2441,7 @@ public final void setPolymorphicTypeWithoutSubTypes(SubTypeOne.BuilderImpl polym : null; } - public final String getEnumTypeAsString() { + public final String getEnumType() { return enumType; } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/eventstream.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/eventstream.java new file mode 100644 index 000000000000..23addb6f0935 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/eventstream.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sharedeventstream.model; + +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; + +/** + * Base interface for all event types in EventStream. + */ +@Generated("software.amazon.awssdk:codegen") +@SdkPublicApi +public interface EventStream extends SdkPojo { + /** + * Special type of {@link EventStream} for unknown types of events that this version of the SDK does not know about + */ + EventStream UNKNOWN = new EventStream() { + @Override + public List> sdkFields() { + return Collections.emptyList(); + } + + @Override + public void accept(StreamBirthsResponseHandler.Visitor visitor) { + visitor.visitDefault(this); + } + + @Override + public void accept(StreamDeathsResponseHandler.Visitor visitor) { + visitor.visitDefault(this); + } + }; + + /** + * Calls the appropriate visit method depending on the subtype of {@link EventStream}. + * + * @param visitor Visitor to invoke. + */ + void accept(StreamBirthsResponseHandler.Visitor visitor); + + /** + * Calls the appropriate visit method depending on the subtype of {@link EventStream}. + * + * @param visitor Visitor to invoke. + */ + void accept(StreamDeathsResponseHandler.Visitor visitor); +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonrequest.java new file mode 100644 index 000000000000..06dbe39a1231 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonrequest.java @@ -0,0 +1,120 @@ +package software.amazon.awssdk.services.sharedeventstream.model; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +@Generated("software.amazon.awssdk:codegen") +public final class GetRandomPersonRequest extends SharedEventStreamRequest implements + ToCopyableBuilder { + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private GetRandomPersonRequest(BuilderImpl builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + super.hashCode(); + return hashCode; + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && equalsBySdkFields(obj); + } + + @Override + public boolean equalsBySdkFields(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof GetRandomPersonRequest)) { + return false; + } + return true; + } + + /** + * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be + * redacted from this string using a placeholder value. + */ + @Override + public String toString() { + return ToString.builder("GetRandomPersonRequest").build(); + } + + public Optional getValueForField(String fieldName, Class clazz) { + return Optional.empty(); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + + public interface Builder extends SharedEventStreamRequest.Builder, SdkPojo, CopyableBuilder { + @Override + Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration); + + @Override + Builder overrideConfiguration(Consumer builderConsumer); + } + + static final class BuilderImpl extends SharedEventStreamRequest.BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(GetRandomPersonRequest model) { + super(model); + } + + @Override + public Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration) { + super.overrideConfiguration(overrideConfiguration); + return this; + } + + @Override + public Builder overrideConfiguration(Consumer builderConsumer) { + super.overrideConfiguration(builderConsumer); + return this; + } + + @Override + public GetRandomPersonRequest build() { + return new GetRandomPersonRequest(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java new file mode 100644 index 000000000000..7832a4d483be --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java @@ -0,0 +1,212 @@ +package software.amazon.awssdk.services.sharedeventstream.model; + +import java.time.Instant; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.function.BiConsumer; +import java.util.function.Function; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.protocol.MarshallLocation; +import software.amazon.awssdk.core.protocol.MarshallingType; +import software.amazon.awssdk.core.traits.LocationTrait; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + */ +@Generated("software.amazon.awssdk:codegen") +public final class GetRandomPersonResponse extends SharedEventStreamResponse implements + ToCopyableBuilder { + private static final SdkField NAME_FIELD = SdkField. builder(MarshallingType.STRING) + .getter(getter(GetRandomPersonResponse::name)).setter(setter(Builder::name)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Name").build()).build(); + + private static final SdkField BIRTHDAY_FIELD = SdkField. builder(MarshallingType.INSTANT) + .getter(getter(GetRandomPersonResponse::birthday)).setter(setter(Builder::birthday)) + .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Birthday").build()).build(); + + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(NAME_FIELD, BIRTHDAY_FIELD)); + + private final String name; + + private final Instant birthday; + + private GetRandomPersonResponse(BuilderImpl builder) { + super(builder); + this.name = builder.name; + this.birthday = builder.birthday; + } + + /** + * Returns the value of the Name property for this object. + * + * @return The value of the Name property for this object. + */ + public String name() { + return name; + } + + /** + * Returns the value of the Birthday property for this object. + * + * @return The value of the Birthday property for this object. + */ + public Instant birthday() { + return birthday; + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + super.hashCode(); + hashCode = 31 * hashCode + Objects.hashCode(name()); + hashCode = 31 * hashCode + Objects.hashCode(birthday()); + return hashCode; + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && equalsBySdkFields(obj); + } + + @Override + public boolean equalsBySdkFields(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof GetRandomPersonResponse)) { + return false; + } + GetRandomPersonResponse other = (GetRandomPersonResponse) obj; + return Objects.equals(name(), other.name()) && Objects.equals(birthday(), other.birthday()); + } + + /** + * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be + * redacted from this string using a placeholder value. + */ + @Override + public String toString() { + return ToString.builder("GetRandomPersonResponse").add("Name", name()).add("Birthday", birthday()).build(); + } + + public Optional getValueForField(String fieldName, Class clazz) { + switch (fieldName) { + case "Name": + return Optional.ofNullable(clazz.cast(name())); + case "Birthday": + return Optional.ofNullable(clazz.cast(birthday())); + default: + return Optional.empty(); + } + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + + private static Function getter(Function g) { + return obj -> g.apply((GetRandomPersonResponse) obj); + } + + private static BiConsumer setter(BiConsumer s) { + return (obj, val) -> s.accept((Builder) obj, val); + } + + public interface Builder extends SharedEventStreamResponse.Builder, SdkPojo, + CopyableBuilder { + /** + * Sets the value of the Name property for this object. + * + * @param name + * The new value for the Name property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + */ + Builder name(String name); + + /** + * Sets the value of the Birthday property for this object. + * + * @param birthday + * The new value for the Birthday property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + */ + Builder birthday(Instant birthday); + } + + static final class BuilderImpl extends SharedEventStreamResponse.BuilderImpl implements Builder { + private String name; + + private Instant birthday; + + private BuilderImpl() { + } + + private BuilderImpl(GetRandomPersonResponse model) { + super(model); + name(model.name); + birthday(model.birthday); + } + + public final String getName() { + return name; + } + + @Override + public final Builder name(String name) { + this.name = name; + return this; + } + + public final void setName(String name) { + this.name = name; + } + + public final Instant getBirthday() { + return birthday; + } + + @Override + public final Builder birthday(Instant birthday) { + this.birthday = birthday; + return this; + } + + public final void setBirthday(Instant birthday) { + this.birthday = birthday; + } + + @Override + public GetRandomPersonResponse build() { + return new GetRandomPersonResponse(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java new file mode 100644 index 000000000000..a3657cccc8a1 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java @@ -0,0 +1,250 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.sharedeventstream.model; + +import java.io.Serializable; +import java.time.Instant; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.function.BiConsumer; +import java.util.function.Function; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.core.protocol.MarshallLocation; +import software.amazon.awssdk.core.protocol.MarshallingType; +import software.amazon.awssdk.core.traits.LocationTrait; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + */ +@Generated("software.amazon.awssdk:codegen") +public final class Person implements SdkPojo, Serializable, ToCopyableBuilder, EventStream { + private static final SdkField NAME_FIELD = SdkField.builder(MarshallingType.STRING) + .getter(getter(Person::name)) + .setter(setter(Builder::name)) + .traits(LocationTrait.builder() + .location(MarshallLocation.PAYLOAD) + .locationName("Name") + .build()).build(); + + private static final SdkField BIRTHDAY_FIELD = SdkField.builder(MarshallingType.INSTANT) + .getter(getter(Person::birthday)) + .setter(setter(Builder::birthday)) + .traits(LocationTrait.builder() + .location(MarshallLocation.PAYLOAD) + .locationName("Birthday") + .build()).build(); + + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(NAME_FIELD, BIRTHDAY_FIELD)); + + private static final long serialVersionUID = 1L; + + private final String name; + + private final Instant birthday; + + private Person(BuilderImpl builder) { + this.name = builder.name; + this.birthday = builder.birthday; + } + + /** + * Returns the value of the Name property for this object. + * + * @return The value of the Name property for this object. + */ + public String name() { + return name; + } + + /** + * Returns the value of the Birthday property for this object. + * + * @return The value of the Birthday property for this object. + */ + public Instant birthday() { + return birthday; + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + Objects.hashCode(name()); + hashCode = 31 * hashCode + Objects.hashCode(birthday()); + return hashCode; + } + + @Override + public boolean equals(Object obj) { + return equalsBySdkFields(obj); + } + + @Override + public boolean equalsBySdkFields(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof Person)) { + return false; + } + Person other = (Person) obj; + return Objects.equals(name(), other.name()) && Objects.equals(birthday(), other.birthday()); + } + + /** + * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be redacted from this string using a placeholder value. + */ + @Override + public String toString() { + return ToString.builder("Person").add("Name", name()).add("Birthday", birthday()).build(); + } + + public Optional getValueForField(String fieldName, Class clazz) { + switch (fieldName) { + case "Name": + return Optional.ofNullable(clazz.cast(name())); + case "Birthday": + return Optional.ofNullable(clazz.cast(birthday())); + default: + return Optional.empty(); + } + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + + private static Function getter(Function g) { + return obj -> g.apply((Person) obj); + } + + private static BiConsumer setter(BiConsumer s) { + return (obj, val) -> s.accept((Builder) obj, val); + } + + /** + * Calls the appropriate visit method depending on the subtype of {@link Person}. + * + * @param visitor Visitor to invoke. + */ + @Override + public void accept(StreamBirthsResponseHandler.Visitor visitor) { + visitor.visit(this); + } + + /** + * Calls the appropriate visit method depending on the subtype of {@link Person}. + * + * @param visitor Visitor to invoke. + */ + @Override + public void accept(StreamDeathsResponseHandler.Visitor visitor) { + visitor.visit(this); + } + + public interface Builder extends SdkPojo, CopyableBuilder { + /** + * Sets the value of the Name property for this object. + * + * @param name The new value for the Name property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + */ + Builder name(String name); + + /** + * Sets the value of the Birthday property for this object. + * + * @param birthday The new value for the Birthday property for this object. + * @return Returns a reference to this object so that method calls can be chained together. + */ + Builder birthday(Instant birthday); + } + + static final class BuilderImpl implements Builder { + private String name; + + private Instant birthday; + + private BuilderImpl() { + } + + private BuilderImpl(Person model) { + name(model.name); + birthday(model.birthday); + } + + public final String getName() { + return name; + } + + @Override + public final Builder name(String name) { + this.name = name; + return this; + } + + public final void setName(String name) { + this.name = name; + } + + public final Instant getBirthday() { + return birthday; + } + + @Override + public final Builder birthday(Instant birthday) { + this.birthday = birthday; + return this; + } + + public final void setBirthday(Instant birthday) { + this.birthday = birthday; + } + + @Override + public Person build() { + return new Person(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/service-2.json new file mode 100644 index 000000000000..4fa3c6714366 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/service-2.json @@ -0,0 +1,85 @@ +{ + "version": "2.0", + "metadata": { + "apiVersion": "2010-05-08", + "endpointPrefix": "shared-event-stream-service", + "globalEndpoint": "shared-event-stream.amazonaws.com", + "protocol": "rest-json", + "serviceAbbreviation": "Shared Event Stream Service", + "serviceFullName": "Service that shares event streams", + "serviceId":"Shared Event Stream Service", + "signatureVersion": "v4", + "uid": "shared-event-stream-service-2010-05-08", + "xmlNamespace": "https://shared-event-stream-service.amazonaws.com/doc/2010-05-08/" + }, + "operations": { + "StreamBirths" : { + "name": "StreamBirths", + "http": { + "method": "GET", + "requestUri": "/births" + }, + "output": { + "shape": "PeopleOutput" + } + }, + "StreamDeaths" : { + "name": "StreamDeaths", + "http": { + "method": "GET", + "requestUri": "/deaths" + }, + "output": { + "shape": "PeopleOutput" + } + }, + "GetRandomPerson" : { + "name" : "GetRandomPerson", + "http": { + "method": "GET", + "requestUri": "/randomPerson" + }, + "output": { + "shape": "Person" + } + } + }, + "shapes": { + "dateType": { + "type": "timestamp" + }, + "String": { + "type": "string" + }, + "PeopleOutput": { + "type": "structure", + "members": { + "EventStream": { + "shape": "EventStream" + } + } + }, + "EventStream": { + "type": "structure", + "members": { + "Person": { + "shape": "Person" + } + }, + "eventstream": true + }, + "Person": { + "type": "structure", + "members": { + "Name": { + "shape": "String" + }, + "Birthday": { + "shape": "dateType" + } + }, + "event": true + } + }, + "documentation": "A service that streams births and deaths" +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsrequest.java new file mode 100644 index 000000000000..ccd9c204863d --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsrequest.java @@ -0,0 +1,120 @@ +package software.amazon.awssdk.services.sharedeventstream.model; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +@Generated("software.amazon.awssdk:codegen") +public final class StreamBirthsRequest extends SharedEventStreamRequest implements ToCopyableBuilder { + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private StreamBirthsRequest(BuilderImpl builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + super.hashCode(); + return hashCode; + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && equalsBySdkFields(obj); + } + + @Override + public boolean equalsBySdkFields(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof StreamBirthsRequest)) { + return false; + } + return true; + } + + /** + * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be redacted from this string using a placeholder value. + */ + @Override + public String toString() { + return ToString.builder("StreamBirthsRequest").build(); + } + + public Optional getValueForField(String fieldName, Class clazz) { + return Optional.empty(); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + + public interface Builder extends SharedEventStreamRequest.Builder, SdkPojo, CopyableBuilder { + @Override + Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration); + + @Override + Builder overrideConfiguration( + Consumer builderConsumer); + } + + static final class BuilderImpl extends SharedEventStreamRequest.BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(StreamBirthsRequest model) { + super(model); + } + + @Override + public Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration) { + super.overrideConfiguration(overrideConfiguration); + return this; + } + + @Override + public Builder overrideConfiguration( + Consumer builderConsumer) { + super.overrideConfiguration(builderConsumer); + return this; + } + + @Override + public StreamBirthsRequest build() { + return new StreamBirthsRequest(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsresponse.java new file mode 100644 index 000000000000..d4eff52c167d --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsresponse.java @@ -0,0 +1,101 @@ +package software.amazon.awssdk.services.sharedeventstream.model; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + */ +@Generated("software.amazon.awssdk:codegen") +public final class StreamBirthsResponse extends SharedEventStreamResponse implements ToCopyableBuilder { + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private StreamBirthsResponse(BuilderImpl builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + super.hashCode(); + return hashCode; + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && equalsBySdkFields(obj); + } + + @Override + public boolean equalsBySdkFields(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof StreamBirthsResponse)) { + return false; + } + return true; + } + + /** + * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be redacted from this string using a placeholder value. + */ + @Override + public String toString() { + return ToString.builder("StreamBirthsResponse").build(); + } + + public Optional getValueForField(String fieldName, Class clazz) { + return Optional.empty(); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + + public interface Builder extends SharedEventStreamResponse.Builder, SdkPojo, CopyableBuilder { + } + + static final class BuilderImpl extends SharedEventStreamResponse.BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(StreamBirthsResponse model) { + super(model); + } + + @Override + public StreamBirthsResponse build() { + return new StreamBirthsResponse(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsrequest.java new file mode 100644 index 000000000000..78d42c6bf9a9 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsrequest.java @@ -0,0 +1,120 @@ +package software.amazon.awssdk.services.sharedeventstream.model; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +@Generated("software.amazon.awssdk:codegen") +public final class StreamDeathsRequest extends SharedEventStreamRequest implements ToCopyableBuilder { + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private StreamDeathsRequest(BuilderImpl builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + super.hashCode(); + return hashCode; + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && equalsBySdkFields(obj); + } + + @Override + public boolean equalsBySdkFields(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof StreamDeathsRequest)) { + return false; + } + return true; + } + + /** + * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be redacted from this string using a placeholder value. + */ + @Override + public String toString() { + return ToString.builder("StreamDeathsRequest").build(); + } + + public Optional getValueForField(String fieldName, Class clazz) { + return Optional.empty(); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + + public interface Builder extends SharedEventStreamRequest.Builder, SdkPojo, CopyableBuilder { + @Override + Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration); + + @Override + Builder overrideConfiguration( + Consumer builderConsumer); + } + + static final class BuilderImpl extends SharedEventStreamRequest.BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(StreamDeathsRequest model) { + super(model); + } + + @Override + public Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration) { + super.overrideConfiguration(overrideConfiguration); + return this; + } + + @Override + public Builder overrideConfiguration( + Consumer builderConsumer) { + super.overrideConfiguration(builderConsumer); + return this; + } + + @Override + public StreamDeathsRequest build() { + return new StreamDeathsRequest(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsresponse.java new file mode 100644 index 000000000000..c52a82f648bf --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsresponse.java @@ -0,0 +1,101 @@ +package software.amazon.awssdk.services.sharedeventstream.model; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + */ +@Generated("software.amazon.awssdk:codegen") +public final class StreamDeathsResponse extends SharedEventStreamResponse implements ToCopyableBuilder { + private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList()); + + private StreamDeathsResponse(BuilderImpl builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static Class serializableBuilderClass() { + return BuilderImpl.class; + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + super.hashCode(); + return hashCode; + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && equalsBySdkFields(obj); + } + + @Override + public boolean equalsBySdkFields(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof StreamDeathsResponse)) { + return false; + } + return true; + } + + /** + * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be redacted from this string using a placeholder value. + */ + @Override + public String toString() { + return ToString.builder("StreamDeathsResponse").build(); + } + + public Optional getValueForField(String fieldName, Class clazz) { + return Optional.empty(); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + + public interface Builder extends SharedEventStreamResponse.Builder, SdkPojo, CopyableBuilder { + } + + static final class BuilderImpl extends SharedEventStreamResponse.BuilderImpl implements Builder { + private BuilderImpl() { + } + + private BuilderImpl(StreamDeathsResponse model) { + super(model); + } + + @Override + public StreamDeathsResponse build() { + return new StreamDeathsResponse(this); + } + + @Override + public List> sdkFields() { + return SDK_FIELDS; + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithResultKeyIterable.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithResultKeyIterable.java index 2123bbb49de9..fcf33f2186d3 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithResultKeyIterable.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithResultKeyIterable.java @@ -63,6 +63,10 @@ * } * *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the paginator. + * It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.jsonprotocoltests.model.PaginatedOperationWithResultKeyRequest)} * operation. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithResultKeyPublisher.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithResultKeyPublisher.java index 123dca18230f..056794469dcb 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithResultKeyPublisher.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithResultKeyPublisher.java @@ -63,6 +63,10 @@ * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the paginator. + * It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.jsonprotocoltests.model.PaginatedOperationWithResultKeyRequest)} * operation. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithoutResultKeyIterable.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithoutResultKeyIterable.java index 5d7c39fad36c..7e8265bf83f3 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithoutResultKeyIterable.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithoutResultKeyIterable.java @@ -59,6 +59,10 @@ * } * *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the paginator. + * It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.jsonprotocoltests.model.PaginatedOperationWithoutResultKeyRequest)} * operation. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithoutResultKeyPublisher.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithoutResultKeyPublisher.java index ed5b3c380841..b940dcb7e87f 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithoutResultKeyPublisher.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/PaginatedOperationWithoutResultKeyPublisher.java @@ -58,6 +58,10 @@ * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the paginator. + * It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.jsonprotocoltests.model.PaginatedOperationWithoutResultKeyRequest)} * operation. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customizations/SameTokenPaginationApiIterable.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customizations/SameTokenPaginationApiIterable.java index fa2738b9e8cb..bb35824f1cda 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customizations/SameTokenPaginationApiIterable.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customizations/SameTokenPaginationApiIterable.java @@ -63,6 +63,10 @@ * } * *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the paginator. + * It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #sameTokenPaginationApi(software.amazon.awssdk.services.jsonprotocoltests.model.SameTokenPaginationApiRequest)} * operation. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customizations/SameTokenPaginationApiPublisher.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customizations/SameTokenPaginationApiPublisher.java index 52c70a8eb3bb..921ba3cfdd96 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customizations/SameTokenPaginationApiPublisher.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customizations/SameTokenPaginationApiPublisher.java @@ -63,6 +63,10 @@ * * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the paginator. + * It only limits the number of results in each page. + *

+ *

* Note: If you prefer to have control on service calls, use the * {@link #sameTokenPaginationApi(software.amazon.awssdk.services.jsonprotocoltests.model.SameTokenPaginationApiRequest)} * operation. diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index 4f780db92091..dd0104f7dd98 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkPreviewApi.java b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkPreviewApi.java new file mode 100644 index 000000000000..7fb713f735f8 --- /dev/null +++ b/core/annotations/src/main/java/software/amazon/awssdk/annotations/SdkPreviewApi.java @@ -0,0 +1,30 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Target; + +/** + * Marker interface for preview and experimental APIs. Breaking changes may be + * introduced to elements marked as {@link SdkPreviewApi}. Users of the SDK + * should assume that anything annotated as preview will change or break, and + * should not use them in production. + */ +@Target({ElementType.PACKAGE, ElementType.TYPE, ElementType.FIELD, ElementType.CONSTRUCTOR, ElementType.METHOD}) +@SdkProtectedApi +public @interface SdkPreviewApi { +} diff --git a/core/arns/pom.xml b/core/arns/pom.xml index 6e6fe87d9081..25eb4a7c159f 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/core/auth/pom.xml b/core/auth/pom.xml index f6b540aaa62d..b16d8b465608 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT auth diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsBasicCredentials.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsBasicCredentials.java index 794526712339..33b0fb03c3e3 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsBasicCredentials.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/AwsBasicCredentials.java @@ -49,7 +49,7 @@ public final class AwsBasicCredentials implements AwsCredentials { private final String secretAccessKey; /** - * Constructs a new credentials object, with the specified AWS access key, AWS secret key and AWS session token. + * Constructs a new credentials object, with the specified AWS access key and AWS secret key. * * @param accessKeyId The AWS access key, used to identify the user interacting with AWS. * @param secretAccessKey The AWS secret access key, used to authenticate the user interacting with AWS. @@ -69,7 +69,7 @@ private AwsBasicCredentials(String accessKeyId, String secretAccessKey, boolean } /** - * Constructs a new credentials object, with the specified AWS access key, AWS secret key and AWS session token. + * Constructs a new credentials object, with the specified AWS access key and AWS secret key. * * @param accessKeyId The AWS access key, used to identify the user interacting with AWS. * @param secretAccessKey The AWS secret access key, used to authenticate the user interacting with AWS. diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java index 0ae639f517dd..0c4632c40646 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/AbstractAws4Signer.java @@ -24,6 +24,7 @@ import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.TreeMap; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; @@ -75,7 +76,14 @@ protected SdkHttpFullRequest.Builder doSign(SdkHttpFullRequest request, .filter(h -> h.equals("required")) .ifPresent(h -> mutableRequest.putHeader(SignerConstant.X_AMZ_CONTENT_SHA256, contentSha256)); - String canonicalRequest = createCanonicalRequest(mutableRequest, contentSha256, signingParams.doubleUrlEncode()); + Map> canonicalHeaders = canonicalizeSigningHeaders(mutableRequest.headers()); + String signedHeadersString = getSignedHeadersString(canonicalHeaders); + + String canonicalRequest = createCanonicalRequest(mutableRequest, + canonicalHeaders, + signedHeadersString, + contentSha256, + signingParams.doubleUrlEncode()); String stringToSign = createStringToSign(canonicalRequest, requestParams); @@ -84,7 +92,7 @@ protected SdkHttpFullRequest.Builder doSign(SdkHttpFullRequest request, byte[] signature = computeSignature(stringToSign, signingKey); mutableRequest.putHeader(SignerConstant.AUTHORIZATION, - buildAuthorizationHeader(signature, sanitizedCredentials, requestParams, mutableRequest)); + buildAuthorizationHeader(signature, sanitizedCredentials, requestParams, signedHeadersString)); processRequestPayload(mutableRequest, signature, signingKey, requestParams, signingParams); @@ -110,11 +118,16 @@ protected SdkHttpFullRequest.Builder doPresign(SdkHttpFullRequest request, } // Add the important parameters for v4 signing - addPreSignInformationToRequest(mutableRequest, sanitizedCredentials, requestParams, expirationInSeconds); + Map> canonicalizedHeaders = canonicalizeSigningHeaders(mutableRequest.headers()); + String signedHeadersString = getSignedHeadersString(canonicalizedHeaders); + + addPreSignInformationToRequest(mutableRequest, signedHeadersString, sanitizedCredentials, + requestParams, expirationInSeconds); String contentSha256 = calculateContentHashPresign(mutableRequest, signingParams); - String canonicalRequest = createCanonicalRequest(mutableRequest, contentSha256, signingParams.doubleUrlEncode()); + String canonicalRequest = createCanonicalRequest(mutableRequest, canonicalizedHeaders, signedHeadersString, + contentSha256, signingParams.doubleUrlEncode()); String stringToSign = createStringToSign(canonicalRequest, requestParams); @@ -191,9 +204,10 @@ protected final byte[] deriveSigningKey(AwsCredentials credentials, Instant sign * generate the canonical request. */ private String createCanonicalRequest(SdkHttpFullRequest.Builder request, + Map> canonicalHeaders, + String signedHeadersString, String contentSha256, boolean doubleUrlEncode) { - String canonicalRequest = request.method().toString() + SignerConstant.LINE_SEPARATOR + // This would optionally double url-encode the resource path @@ -201,9 +215,9 @@ private String createCanonicalRequest(SdkHttpFullRequest.Builder request, SignerConstant.LINE_SEPARATOR + getCanonicalizedQueryString(request.rawQueryParameters()) + SignerConstant.LINE_SEPARATOR + - getCanonicalizedHeaderString(request.headers()) + + getCanonicalizedHeaderString(canonicalHeaders) + SignerConstant.LINE_SEPARATOR + - getSignedHeadersString(request.headers()) + + signedHeadersString + SignerConstant.LINE_SEPARATOR + contentSha256; @@ -254,12 +268,11 @@ private byte[] computeSignature(String stringToSign, byte[] signingKey) { private String buildAuthorizationHeader(byte[] signature, AwsCredentials credentials, Aws4SignerRequestParams signerParams, - SdkHttpFullRequest.Builder mutableRequest) { + String signedHeadersString) { String signingCredentials = credentials.accessKeyId() + "/" + signerParams.getScope(); String credential = "Credential=" + signingCredentials; - String signerHeaders = "SignedHeaders=" + - getSignedHeadersString(mutableRequest.headers()); + String signerHeaders = "SignedHeaders=" + signedHeadersString; String signatureHeader = "Signature=" + BinaryUtils.toHex(signature); return SignerConstant.AWS4_SIGNING_ALGORITHM + " " + credential + ", " + signerHeaders + ", " + signatureHeader; @@ -269,6 +282,7 @@ private String buildAuthorizationHeader(byte[] signature, * Includes all the signing headers as request parameters for pre-signing. */ private void addPreSignInformationToRequest(SdkHttpFullRequest.Builder mutableRequest, + String signedHeadersString, AwsCredentials sanitizedCredentials, Aws4SignerRequestParams signerParams, long expirationInSeconds) { @@ -277,34 +291,39 @@ private void addPreSignInformationToRequest(SdkHttpFullRequest.Builder mutableRe mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_ALGORITHM, SignerConstant.AWS4_SIGNING_ALGORITHM); mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_DATE, signerParams.getFormattedRequestSigningDateTime()); - mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_SIGNED_HEADER, - getSignedHeadersString(mutableRequest.headers())); - mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_EXPIRES, - Long.toString(expirationInSeconds)); + mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_SIGNED_HEADER, signedHeadersString); + mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_EXPIRES, Long.toString(expirationInSeconds)); mutableRequest.putRawQueryParameter(SignerConstant.X_AMZ_CREDENTIAL, signingCredentials); } + private Map> canonicalizeSigningHeaders(Map> headers) { + Map> result = new TreeMap<>(); - private String getCanonicalizedHeaderString(Map> headers) { - List sortedHeaders = new ArrayList<>(headers.keySet()); - sortedHeaders.sort(String.CASE_INSENSITIVE_ORDER); - - StringBuilder buffer = new StringBuilder(); - for (String header : sortedHeaders) { - if (shouldExcludeHeaderFromSigning(header)) { + for (Map.Entry> header : headers.entrySet()) { + String lowerCaseHeader = lowerCase(header.getKey()); + if (LIST_OF_HEADERS_TO_IGNORE_IN_LOWER_CASE.contains(lowerCaseHeader)) { continue; } - String key = lowerCase(header); - for (String headerValue : headers.get(header)) { - appendCompactedString(buffer, key); + result.computeIfAbsent(lowerCaseHeader, x -> new ArrayList<>()).addAll(header.getValue()); + } + + return result; + } + + private String getCanonicalizedHeaderString(Map> canonicalizedHeaders) { + StringBuilder buffer = new StringBuilder(); + + canonicalizedHeaders.forEach((headerName, headerValues) -> { + for (String headerValue : headerValues) { + appendCompactedString(buffer, headerName); buffer.append(":"); if (headerValue != null) { appendCompactedString(buffer, headerValue); } buffer.append("\n"); } - } + }); return buffer.toString(); } @@ -350,28 +369,17 @@ private boolean isWhiteSpace(final char ch) { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\u000b' || ch == '\r' || ch == '\f'; } - private String getSignedHeadersString(Map> headers) { - List sortedHeaders = new ArrayList<>(headers.keySet()); - sortedHeaders.sort(String.CASE_INSENSITIVE_ORDER); - + private String getSignedHeadersString(Map> canonicalizedHeaders) { StringBuilder buffer = new StringBuilder(); - for (String header : sortedHeaders) { - if (shouldExcludeHeaderFromSigning(header)) { - continue; - } + for (String header : canonicalizedHeaders.keySet()) { if (buffer.length() > 0) { buffer.append(";"); } - buffer.append(lowerCase(header)); + buffer.append(header); } - return buffer.toString(); } - private boolean shouldExcludeHeaderFromSigning(String header) { - return LIST_OF_HEADERS_TO_IGNORE_IN_LOWER_CASE.contains(lowerCase(header)); - } - private void addHostHeader(SdkHttpFullRequest.Builder mutableRequest) { // AWS4 requires that we sign the Host header so we // have to have it in the request by the time we sign. diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index 8ae9bc39b4b7..a6f9e690b993 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT aws-core @@ -63,6 +63,11 @@ http-client-spi ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + software.amazon.awssdk utils diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java index eaaac5e52503..0147a7618e2b 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/client/handler/AwsClientHandlerUtils.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.nio.ByteBuffer; +import java.time.Duration; import java.util.Map; import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkProtectedApi; @@ -42,8 +43,10 @@ import software.amazon.awssdk.core.interceptor.InterceptorContext; import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.Validate; import software.amazon.eventstream.HeaderValue; @@ -69,7 +72,11 @@ static ExecutionContext .flatMap(AwsRequestOverrideConfiguration::credentialsProvider) .orElse(clientCredentials); + long credentialsResolveStart = System.nanoTime(); AwsCredentials credentials = credentialsProvider.resolveCredentials(); + Duration fetchDuration = Duration.ofNanos(System.nanoTime() - credentialsResolveStart); + MetricCollector metricCollector = resolveMetricCollector(executionParams); + metricCollector.reportMetric(CoreMetric.CREDENTIALS_FETCH_DURATION, fetchDuration); Validate.validState(credentials != null, "Credential providers must never return null."); @@ -99,6 +106,7 @@ static ExecutionContext .build()) .executionAttributes(executionAttributes) .signer(computeSigner(originalRequest, clientConfig)) + .metricCollector(metricCollector) .build(); } @@ -133,4 +141,12 @@ private static Signer computeSigner(SdkRequest originalRequest, .flatMap(RequestOverrideConfiguration::signer) .orElse(clientConfiguration.option(AwsAdvancedClientOption.SIGNER)); } + + private static MetricCollector resolveMetricCollector(ClientExecutionParams params) { + MetricCollector metricCollector = params.getMetricCollector(); + if (metricCollector == null) { + metricCollector = MetricCollector.create("ApiCall"); + } + return metricCollector; + } } diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/DefaultServiceEndpointBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/DefaultServiceEndpointBuilder.java index 4d7b2b2f1f68..0320e3f7dfa2 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/DefaultServiceEndpointBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/DefaultServiceEndpointBuilder.java @@ -17,8 +17,10 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.List; import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.regions.ServiceMetadata; @@ -66,10 +68,23 @@ public URI getServiceEndpoint() { ServiceMetadata serviceMetadata = ServiceMetadata.of(serviceName) .reconfigure(c -> c.profileFile(() -> profileFile) .profileName(profileName)); - return withProtocol(serviceMetadata.endpointFor(region)); + URI endpoint = addProtocolToServiceEndpoint(serviceMetadata.endpointFor(region)); + + if (endpoint.getHost() == null) { + String error = "Configured region (" + region + ") resulted in an invalid URI: " + endpoint; + + List exampleRegions = serviceMetadata.regions(); + if (!exampleRegions.isEmpty()) { + error += " Valid region examples: " + exampleRegions; + } + + throw SdkClientException.create(error); + } + + return endpoint; } - private URI withProtocol(URI endpointWithoutProtocol) throws IllegalArgumentException { + private URI addProtocolToServiceEndpoint(URI endpointWithoutProtocol) throws IllegalArgumentException { try { return new URI(protocol + "://" + endpointWithoutProtocol); } catch (URISyntaxException e) { diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java index d801bb9c19e6..d8437707427e 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/eventstream/EventStreamAsyncResponseTransformer.java @@ -17,6 +17,7 @@ import static java.util.Collections.singletonList; import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZN_REQUEST_ID_HEADER; +import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZN_REQUEST_ID_HEADERS; import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZ_ID_2_HEADER; import static software.amazon.awssdk.utils.FunctionalUtils.runAndLogError; @@ -49,6 +50,7 @@ import software.amazon.awssdk.http.SdkCancellationException; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.http.SdkHttpUtils; import software.amazon.eventstream.Message; import software.amazon.eventstream.MessageDecoder; @@ -193,9 +195,10 @@ public CompletableFuture prepare() { @Override public void onResponse(SdkResponse response) { if (response != null && response.sdkHttpResponse() != null) { - this.requestId = response.sdkHttpResponse() - .firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER) - .orElse(null); + this.requestId = SdkHttpUtils.firstMatchingHeaderFromCollection(response.sdkHttpResponse().headers(), + X_AMZN_REQUEST_ID_HEADERS) + .orElse(null); + this.extendedRequestId = response.sdkHttpResponse() .firstMatchingHeader(X_AMZ_ID_2_HEADER) .orElse(null); diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsServiceException.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsServiceException.java index ac8146aef8fc..9f93a307baa2 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsServiceException.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/exception/AwsServiceException.java @@ -65,7 +65,8 @@ public String getMessage() { return awsErrorDetails().errorMessage() + " (Service: " + awsErrorDetails().serviceName() + ", Status Code: " + statusCode() + - ", Request ID: " + requestId() + ")"; + ", Request ID: " + requestId() + + ", Extended Request ID: " + extendedRequestId() + ")"; } return super.getMessage(); @@ -164,6 +165,9 @@ public interface Builder extends SdkServiceException.Builder { @Override Builder requestId(String requestId); + @Override + Builder extendedRequestId(String extendedRequestId); + @Override Builder statusCode(int statusCode); @@ -232,6 +236,12 @@ public Builder requestId(String requestId) { return this; } + @Override + public Builder extendedRequestId(String extendedRequestId) { + this.extendedRequestId = extendedRequestId; + return this; + } + @Override public Builder statusCode(int statusCode) { this.statusCode = statusCode; diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml new file mode 100644 index 000000000000..bf882585b018 --- /dev/null +++ b/core/metrics-spi/pom.xml @@ -0,0 +1,81 @@ + + + + core + software.amazon.awssdk + 2.13.56-SNAPSHOT + + 4.0.0 + + metrics-spi + AWS Java SDK :: Metrics SPI + This is the base module for SDK metrics feature. It contains the interfaces used for metrics feature + that are used by other modules in the library. + + + + + software.amazon.awssdk + annotations + ${awsjavasdk.version} + + + software.amazon.awssdk + utils + ${awsjavasdk.version} + + + software.amazon.awssdk + test-utils + ${awsjavasdk.version} + test + + + junit + junit + test + + + com.github.tomakehurst + wiremock + test + + + org.assertj + assertj-core + test + + + org.mockito + mockito-core + test + + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.metrics + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.8 + 1.8 + + + + + diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/LoggingMetricPublisher.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/LoggingMetricPublisher.java new file mode 100644 index 000000000000..f5a6fce87d50 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/LoggingMetricPublisher.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.Logger; + +/** + * An implementation of {@link MetricPublisher} that writes all published metrics to the logs at the INFO level under the + * {@code software.amazon.awssdk.metrics.LoggingMetricPublisher} namespace. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@SdkPublicApi +public final class LoggingMetricPublisher implements MetricPublisher { + private static final Logger LOGGER = Logger.loggerFor(LoggingMetricPublisher.class); + + private LoggingMetricPublisher() { + } + + public static LoggingMetricPublisher create() { + return new LoggingMetricPublisher(); + } + + @Override + public void publish(MetricCollection metricCollection) { + LOGGER.info(() -> "Metrics published: " + metricCollection); + } + + @Override + public void close() { + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCategory.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCategory.java new file mode 100644 index 000000000000..aaecb83ae724 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCategory.java @@ -0,0 +1,82 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * A enum class representing the different types of metric categories in the SDK. + *

+ * A metric can be tagged with multiple categories. Clients can enable/disable metric collection + * at a {@link MetricCategory} level. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@SdkPublicApi +public enum MetricCategory { + /** + * Metrics collected by the core SDK are classified under this category. + */ + CORE("Core"), + + /** + * Metrics collected at the http client level are classified under this category. + */ + HTTP_CLIENT("HttpClient"), + + /** + * Metrics specified by the customer should be classified under this category. + */ + CUSTOM("Custom"), + + /** + * This is an umbrella category (provided for convenience) that records metrics belonging to every category + * defined in this enum. Clients who wish to collect lot of SDK metrics data should use this. + *

+ * Note: Enabling this option along with {@link MetricLevel#TRACE} is verbose and can be expensive based on the platform + * the metrics are uploaded to. Please make sure you need all this data before using this category. + */ + ALL("All"); + + private final String value; + + MetricCategory(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + /** + * Create a {@link MetricCategory} from the given String value. This method is case insensitive. + * + * @param value the value to create the {@link MetricCategory} from + * @return A {@link MetricCategory} if the given {@link #value} matches one of the enum values. + * Otherwise throws {@link IllegalArgumentException} + */ + public static MetricCategory fromString(String value) { + for (MetricCategory mc : MetricCategory.values()) { + if (mc.value.equalsIgnoreCase(value)) { + return mc; + } + } + + throw new IllegalArgumentException("MetricCategory cannot be created from value: " + value); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollection.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollection.java new file mode 100644 index 000000000000..cfd5ba23bf0f --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollection.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import java.time.Instant; +import java.util.List; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * An immutable collection of metrics. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@SdkPublicApi +public interface MetricCollection extends Iterable> { + /** + * @return The name of this metric collection. + */ + String name(); + + /** + * Return a stream of records in this collection. + */ + default Stream> stream() { + return StreamSupport.stream(spliterator(), false); + } + + /** + * Return all the values of the given metric. + * + * @param metric The metric. + * @param The type of the value. + * @return All of the values of this metric. + */ + List metricValues(SdkMetric metric); + + /** + * @return The child metric collections. + */ + List children(); + + /** + * Return all of the {@link #children()} with a specific name. + * + * @param name The name by which we will filter {@link #children()}. + * @return The child metric collections that have the provided name. + */ + default Stream childrenWithName(String name) { + return children().stream().filter(c -> c.name().equals(name)); + } + + /** + * @return The time at which this collection was created. + */ + Instant creationTime(); +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollector.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollector.java new file mode 100644 index 000000000000..e22e17039e4c --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricCollector.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.metrics.internal.DefaultMetricCollector; + +/** + * Used to collect metrics reported by the SDK. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@NotThreadSafe +@SdkPublicApi +public interface MetricCollector { + /** + * @return The name of this collector. + */ + String name(); + + /** + * Report a metric. + */ + void reportMetric(SdkMetric metric, T data); + + /** + * Create a child of this metric collector. + * + * @param name The name of the child collector. + * @return The child collector. + */ + MetricCollector createChild(String name); + + /** + * Return the collected metrics. + *

+ * Calling {@code collect()} prevents further invocations of {@link #reportMetric(SdkMetric, Object)}. + * @return The collected metrics. + */ + MetricCollection collect(); + + static MetricCollector create(String name) { + return DefaultMetricCollector.create(name); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricLevel.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricLevel.java new file mode 100644 index 000000000000..0c5bdfcee6bd --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricLevel.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * The {@code MetricLevel} associated with a {@link SdkMetric}, similar to log levels, defines the 'scenario' in which the metric + * is useful. This makes it easy to reduce the cost of metric publishing (e.g. by setting it to {@link #INFO}), and then increase + * it when additional data level is needed for debugging purposes (e.g. by setting it to {@link #TRACE}. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@SdkPublicApi +public enum MetricLevel { + /** + * The metric level that includes every other metric level, as well as some highly-technical metrics that may only be useful + * in very specific performance or failure scenarios. + */ + TRACE, + + /** + * The "default" metric level that includes metrics that are useful for identifying why errors or performance issues + * are occurring within the SDK. This excludes technical metrics that are only useful in very specific performance or failure + * scenarios. + */ + INFO, + + /** + * Includes metrics that report when API call errors are occurring within the SDK. This does not include all + * of the information that may be generally useful when debugging why errors are occurring (e.g. latency). + */ + ERROR; + + public boolean includesLevel(MetricLevel level) { + return this.compareTo(level) <= 0; + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricPublisher.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricPublisher.java new file mode 100644 index 000000000000..0f6e3554292b --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricPublisher.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.utils.SdkAutoCloseable; + +/** + * Interface to report and publish the collected SDK metric events to external + * sources. + *

+ * Conceptually, a publisher receives a stream of {@link MetricCollection} objects + * overs its lifetime through its {@link #publish(MetricCollection)} )} method. + * Implementations are then free further aggregate these events into sets of + * metrics that are then published to some external system for further use. + * As long as a publisher is not closed, then it can receive {@code + * MetricCollection} objects at any time. In addition, as the SDK makes use of + * multithreading, it's possible that the publisher is shared concurrently by + * multiple threads, and necessitates that all implementations are threadsafe. + *

+ * The SDK may invoke methods on the interface from multiple threads + * concurrently so implementations must be threadsafe. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@ThreadSafe +@SdkPublicApi +public interface MetricPublisher extends SdkAutoCloseable { + /** + * Notify the publisher of new metric data. After this call returns, the + * caller can safely discard the given {@code metricCollection} instance if it + * no longer needs it. Implementations are strongly encouraged to complete + * any further aggregation and publishing of metrics in an asynchronous manner to + * avoid blocking the calling thread. + *

+ * With the exception of a {@code null} {@code metricCollection}, all + * invocations of this method must return normally. This + * is to ensure that callers of the publisher can safely assume that even + * in situations where an error happens during publishing that it will not + * interrupt the calling thread. + * + * @param metricCollection The collection of metrics. + * @throws IllegalArgumentException If {@code metricCollection} is {@code null}. + */ + void publish(MetricCollection metricCollection); + + /** + * {@inheritDoc} + *

+ * Important: Implementations must block the calling thread until all + * pending metrics are published and any resources acquired have been freed. + */ + @Override + void close(); +} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/SdkFunction.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricRecord.java similarity index 54% rename from test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/SdkFunction.java rename to core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricRecord.java index 9b5c3136a961..44ee098da41d 100644 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/SdkFunction.java +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/MetricRecord.java @@ -13,21 +13,26 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.core.waiters; +package software.amazon.awssdk.metrics; -import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; -@SdkProtectedApi -public interface SdkFunction { +/** + * A container associating a metric and its value. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@SdkPublicApi +public interface MetricRecord { + /** + * @return The metric. + */ + SdkMetric metric(); /** - * Abstract method that makes a call to the operation - * specified by the waiter by taking the corresponding - * input and returns the corresponding output - * - * @param input Corresponding request for the operation - * @return Corresponding result of the operation + * @return The value of this metric. */ - OutputT apply(InputT input); + T value(); } - diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/NoOpMetricCollector.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/NoOpMetricCollector.java new file mode 100644 index 000000000000..09c6dcebb99f --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/NoOpMetricCollector.java @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.metrics.internal.EmptyMetricCollection; + +/** + * A metric collector that doesn't do anything. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@SdkPublicApi +public final class NoOpMetricCollector implements MetricCollector { + private static final NoOpMetricCollector INSTANCE = new NoOpMetricCollector(); + + private NoOpMetricCollector() { + } + + @Override + public String name() { + return "NoOp"; + } + + @Override + public void reportMetric(SdkMetric metric, T data) { + } + + @Override + public MetricCollector createChild(String name) { + return INSTANCE; + } + + @Override + public MetricCollection collect() { + return EmptyMetricCollection.create(); + } + + public static NoOpMetricCollector create() { + return INSTANCE; + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/SdkMetric.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/SdkMetric.java new file mode 100644 index 000000000000..61a31ee0eaf3 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/SdkMetric.java @@ -0,0 +1,84 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import java.util.Set; +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.metrics.internal.DefaultSdkMetric; + +/** + * A specific SDK metric. + * + * @param The type for values of this metric. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@SdkPublicApi +public interface SdkMetric { + + /** + * @return The name of this metric. + */ + String name(); + + /** + * @return The categories of this metric. + */ + Set categories(); + + /** + * @return The level of this metric. + */ + MetricLevel level(); + + /** + * @return The class of the value associated with this metric. + */ + Class valueClass(); + + /** + * Create a new metric. + * + * @param name The name of this metric. + * @param clzz The class of the object containing the associated value for this metric. + * @param c1 A category associated with this metric. + * @param cn Additional categories associated with this metric. + * @param The type of the object containing the associated value for this metric. + * @return The created metric. + * + * @throws IllegalArgumentException If a metric of the same name has already been created. + */ + static SdkMetric create(String name, Class clzz, MetricLevel level, MetricCategory c1, MetricCategory... cn) { + return DefaultSdkMetric.create(name, clzz, level, c1, cn); + } + + /** + * Create a new metric. + * + * @param name The name of this metric. + * @param clzz The class of the object containing the associated value for this metric. + * @param categories The categories associated with this metric. + * @param The type of the object containing the associated value for this metric. + * @return The created metric. + * + * @throws IllegalArgumentException If a metric of the same name has already been created. + */ + static SdkMetric create(String name, Class clzz, MetricLevel level, Set categories) { + return DefaultSdkMetric.create(name, clzz, level, categories); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollection.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollection.java new file mode 100644 index 000000000000..ace8b0860130 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollection.java @@ -0,0 +1,89 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import static java.util.stream.Collectors.toList; + +import java.time.Instant; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.utils.ToString; + +@SdkInternalApi +public final class DefaultMetricCollection implements MetricCollection { + private final String name; + private final Map, List>> metrics; + private final List children; + private final Instant creationTime; + + public DefaultMetricCollection(String name, Map, + List>> metrics, List children) { + this.name = name; + this.metrics = metrics; + this.children = children != null ? Collections.unmodifiableList(children) : Collections.emptyList(); + this.creationTime = Instant.now(); + } + + @Override + public String name() { + return name; + } + + @SuppressWarnings("unchecked") + @Override + public List metricValues(SdkMetric metric) { + if (metrics.containsKey(metric)) { + List> metricRecords = metrics.get(metric); + List values = metricRecords.stream() + .map(MetricRecord::value) + .collect(toList()); + return (List) Collections.unmodifiableList(values); + } + return Collections.emptyList(); + } + + @Override + public List children() { + return children; + } + + @Override + public Instant creationTime() { + return creationTime; + } + + @Override + public Iterator> iterator() { + return metrics.values().stream() + .flatMap(List::stream) + .iterator(); + } + + @Override + public String toString() { + return ToString.builder("MetricCollection") + .add("name", name) + .add("metrics", metrics.values().stream().flatMap(List::stream).collect(toList())) + .add("children", children) + .build(); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollector.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollector.java new file mode 100644 index 000000000000..2f4f99adc21f --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollector.java @@ -0,0 +1,86 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +/** + * TODO: Before launch, we should iterate on the performance of this collector, because it's currently very naive. + */ +@SdkInternalApi +public final class DefaultMetricCollector implements MetricCollector { + private static final Logger log = Logger.loggerFor(DefaultMetricCollector.class); + private final String name; + private final Map, List>> metrics = new LinkedHashMap<>(); + private final List children = new ArrayList<>(); + + public DefaultMetricCollector(String name) { + this.name = name; + } + + @Override + public String name() { + return name; + } + + @Override + public synchronized void reportMetric(SdkMetric metric, T data) { + metrics.computeIfAbsent(metric, (m) -> new ArrayList<>()) + .add(new DefaultMetricRecord<>(metric, data)); + } + + @Override + public synchronized MetricCollector createChild(String name) { + MetricCollector child = new DefaultMetricCollector(name); + children.add(child); + return child; + } + + @Override + public synchronized MetricCollection collect() { + List collectedChildren = children.stream() + .map(MetricCollector::collect) + .collect(Collectors.toList()); + + DefaultMetricCollection metricRecords = new DefaultMetricCollection(name, metrics, collectedChildren); + + log.debug(() -> "Collected metrics records: " + metricRecords); + return metricRecords; + } + + public static MetricCollector create(String name) { + Validate.notEmpty(name, "name"); + return new DefaultMetricCollector(name); + } + + @Override + public String toString() { + return ToString.builder("DefaultMetricCollector") + .add("metrics", metrics).build(); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricRecord.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricRecord.java new file mode 100644 index 000000000000..801823c1e9b4 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultMetricRecord.java @@ -0,0 +1,50 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.utils.ToString; + +@SdkInternalApi +public final class DefaultMetricRecord implements MetricRecord { + private final SdkMetric metric; + private final T value; + + public DefaultMetricRecord(SdkMetric metric, T value) { + this.metric = metric; + this.value = value; + } + + @Override + public SdkMetric metric() { + return metric; + } + + @Override + public T value() { + return value; + } + + @Override + public String toString() { + return ToString.builder("MetricRecord") + .add("metric", metric.name()) + .add("value", value) + .build(); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetric.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetric.java new file mode 100644 index 000000000000..461307f31e70 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetric.java @@ -0,0 +1,159 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import java.util.Collections; +import java.util.EnumSet; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.ToString; +import software.amazon.awssdk.utils.Validate; + +@SdkInternalApi +public final class DefaultSdkMetric extends AttributeMap.Key implements SdkMetric { + private static final ConcurrentHashMap, Boolean> SDK_METRICS = new ConcurrentHashMap<>(); + + private final String name; + private final Class clzz; + private final Set categories; + private final MetricLevel level; + + private DefaultSdkMetric(String name, Class clzz, MetricLevel level, Set categories) { + super(clzz); + this.name = Validate.notBlank(name, "name must not be blank"); + this.clzz = Validate.notNull(clzz, "clzz must not be null"); + this.level = Validate.notNull(level, "level must not be null"); + Validate.notEmpty(categories, "categories must not be empty"); + this.categories = EnumSet.copyOf(categories); + } + + /** + * @return The name of this event. + */ + @Override + public String name() { + return name; + } + + /** + * @return The categories of this event. + */ + @Override + public Set categories() { + return Collections.unmodifiableSet(categories); + } + + @Override + public MetricLevel level() { + return level; + } + + /** + * @return The class of the value associated with this event. + */ + @Override + public Class valueClass() { + return clzz; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + DefaultSdkMetric that = (DefaultSdkMetric) o; + + return name.equals(that.name); + } + + @Override + public int hashCode() { + return name.hashCode(); + } + + @Override + public String toString() { + return ToString.builder("DefaultMetric") + .add("name", name) + .add("categories", categories()) + .build(); + } + + /** + * Create a new metric. + * + * @param name The name of this metric. + * @param clzz The class of the object containing the associated value for this metric. + * @param c1 A category associated with this metric. + * @param cn Additional categories associated with this metric. + * @param The type of the object containing the associated value for this metric. + * @return The created metric. + * + * @throws IllegalArgumentException If a metric of the same name has already been created. + */ + public static SdkMetric create(String name, Class clzz, MetricLevel level, + MetricCategory c1, MetricCategory... cn) { + Stream categoryStream = Stream.of(c1); + if (cn != null) { + categoryStream = Stream.concat(categoryStream, Stream.of(cn)); + } + Set categories = categoryStream.collect(Collectors.toSet()); + return create(name, clzz, level, categories); + } + + /** + * Create a new metric. + * + * @param name The name of this metric. + * @param clzz The class of the object containing the associated value for this metric. + * @param categories The categories associated with this metric. + * @param The type of the object containing the associated value for this metric. + * @return The created metric. + * + * @throws IllegalArgumentException If a metric of the same name has already been created. + */ + public static SdkMetric create(String name, Class clzz, MetricLevel level, Set categories) { + Validate.noNullElements(categories, "categories must not contain null elements"); + SdkMetric event = new DefaultSdkMetric<>(name, clzz, level, categories); + if (SDK_METRICS.putIfAbsent(event, Boolean.TRUE) != null) { + throw new IllegalArgumentException("Metric with name " + name + " has already been created"); + } + return event; + } + + @SdkTestInternalApi + static void clearDeclaredMetrics() { + SDK_METRICS.clear(); + } + + @SdkTestInternalApi + static Set> declaredEvents() { + return SDK_METRICS.keySet(); + } +} diff --git a/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/EmptyMetricCollection.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/EmptyMetricCollection.java new file mode 100644 index 000000000000..e7fc23366d49 --- /dev/null +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/internal/EmptyMetricCollection.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import java.time.Instant; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; + +@SdkInternalApi +public final class EmptyMetricCollection implements MetricCollection { + private final Instant creationTime = Instant.now(); + + @Override + public String name() { + return "NoOp"; + } + + @Override + public List metricValues(SdkMetric metric) { + return Collections.emptyList(); + } + + @Override + public List children() { + return Collections.emptyList(); + } + + @Override + public Instant creationTime() { + return creationTime; + } + + @Override + public Iterator> iterator() { + return Collections.emptyIterator(); + } + + public static EmptyMetricCollection create() { + return new EmptyMetricCollection(); + } +} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterState.java b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/package-info.java similarity index 67% rename from test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterState.java rename to core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/package-info.java index c603db4772eb..4b14e8de0aa5 100644 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterState.java +++ b/core/metrics-spi/src/main/java/software/amazon/awssdk/metrics/package-info.java @@ -13,15 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.core.waiters; +@SdkPreviewApi +package software.amazon.awssdk.metrics; -import software.amazon.awssdk.annotations.SdkProtectedApi; - -@SdkProtectedApi -public enum WaiterState { - /** - * Three different states a resource can be based - * on the waiter definition - */ - SUCCESS, RETRY, FAILURE -} +import software.amazon.awssdk.annotations.SdkPreviewApi; \ No newline at end of file diff --git a/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/MetricLevelTest.java b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/MetricLevelTest.java new file mode 100644 index 000000000000..317538e32b16 --- /dev/null +++ b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/MetricLevelTest.java @@ -0,0 +1,43 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.Test; + +public class MetricLevelTest { + @Test + public void allLevelsAreCorrect() { + assertThat(MetricLevel.TRACE.includesLevel(MetricLevel.TRACE)).isTrue(); + assertThat(MetricLevel.TRACE.includesLevel(MetricLevel.INFO)).isTrue(); + assertThat(MetricLevel.TRACE.includesLevel(MetricLevel.ERROR)).isTrue(); + } + + @Test + public void infoLevelsAreCorrect() { + assertThat(MetricLevel.INFO.includesLevel(MetricLevel.TRACE)).isFalse(); + assertThat(MetricLevel.INFO.includesLevel(MetricLevel.INFO)).isTrue(); + assertThat(MetricLevel.INFO.includesLevel(MetricLevel.ERROR)).isTrue(); + } + + @Test + public void errorLevelsAreCorrect() { + assertThat(MetricLevel.ERROR.includesLevel(MetricLevel.TRACE)).isFalse(); + assertThat(MetricLevel.ERROR.includesLevel(MetricLevel.INFO)).isFalse(); + assertThat(MetricLevel.ERROR.includesLevel(MetricLevel.ERROR)).isTrue(); + } +} \ No newline at end of file diff --git a/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectionTest.java b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectionTest.java new file mode 100644 index 000000000000..65d168b4e1a7 --- /dev/null +++ b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectionTest.java @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import org.junit.AfterClass; +import org.junit.Test; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; + +public class DefaultMetricCollectionTest { + private static final SdkMetric M1 = SdkMetric.create("m1", Integer.class, MetricLevel.INFO, MetricCategory.CORE); + + @AfterClass + public static void teardown() { + DefaultSdkMetric.clearDeclaredMetrics(); + } + + @Test + public void testMetricValues_noValues_returnsEmptyList() { + DefaultMetricCollection foo = new DefaultMetricCollection("foo", Collections.emptyMap(), Collections.emptyList()); + assertThat(foo.metricValues(M1)).isEmpty(); + } + + @Test + public void testChildren_noChildren_returnsEmptyList() { + DefaultMetricCollection foo = new DefaultMetricCollection("foo", Collections.emptyMap(), Collections.emptyList()); + assertThat(foo.children()).isEmpty(); + } + + @Test + public void testIterator_iteratesOverAllValues() { + Integer[] values = {1, 2, 3}; + Map, List>> recordMap = new HashMap<>(); + List> records = Stream.of(values).map(v -> new DefaultMetricRecord<>(M1, v)).collect(Collectors.toList()); + recordMap.put(M1, records); + + DefaultMetricCollection collection = new DefaultMetricCollection("foo", recordMap, Collections.emptyList()); + final Set iteratorValues = StreamSupport.stream(collection.spliterator(), false) + .map(MetricRecord::value) + .map(Integer.class::cast) + .collect(Collectors.toSet()); + + assertThat(iteratorValues).containsExactly(values); + } +} diff --git a/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectorTest.java b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectorTest.java new file mode 100644 index 000000000000..d3f0682d6c8d --- /dev/null +++ b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultMetricCollectorTest.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import java.util.stream.Stream; +import org.junit.AfterClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; + +public class DefaultMetricCollectorTest { + private static final SdkMetric M1 = SdkMetric.create("m1", Integer.class, MetricLevel.INFO, MetricCategory.CORE); + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @AfterClass + public static void teardown() { + DefaultSdkMetric.clearDeclaredMetrics(); + } + + @Test + public void testName_returnsName() { + MetricCollector collector = MetricCollector.create("collector"); + assertThat(collector.name()).isEqualTo("collector"); + } + + @Test + public void testCreateChild_returnsChildWithCorrectName() { + MetricCollector parent = MetricCollector.create("parent"); + MetricCollector child = parent.createChild("child"); + + assertThat(child.name()).isEqualTo("child"); + } + + @Test + public void testCollect_allReportedMetricsInCollection() { + MetricCollector collector = MetricCollector.create("collector"); + Integer[] values = {1, 2, 3}; + Stream.of(values).forEach(v -> collector.reportMetric(M1, v)); + MetricCollection collect = collector.collect(); + assertThat(collect.metricValues(M1)).containsExactly(values); + } + + @Test + public void testCollect_returnedCollectionContainsAllChildren() { + MetricCollector parent = MetricCollector.create("parent"); + String[] childNames = {"c1", "c2", "c3" }; + Stream.of(childNames).forEach(parent::createChild); + MetricCollection collected = parent.collect(); + assertThat(collected.children().stream().map(MetricCollection::name)).containsExactly(childNames); + } +} diff --git a/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricRecordTest.java b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricRecordTest.java new file mode 100644 index 000000000000..a6a2fbbc18d6 --- /dev/null +++ b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricRecordTest.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.Test; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.MetricRecord; + +/** + * Tests for {@link DefaultMetricRecord}. + */ +public class DefaultSdkMetricRecordTest { + @Test + public void testGetters() { + SdkMetric event = SdkMetric.create("foo", Integer.class, MetricLevel.INFO, MetricCategory.CORE); + + MetricRecord record = new DefaultMetricRecord<>(event, 2); + + assertThat(record.metric()).isEqualTo(event); + assertThat(record.value()).isEqualTo(2); + } +} diff --git a/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricTest.java b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricTest.java new file mode 100644 index 000000000000..1fe8d4fbea1a --- /dev/null +++ b/core/metrics-spi/src/test/java/software/amazon/awssdk/metrics/internal/DefaultSdkMetricTest.java @@ -0,0 +1,136 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.internal; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; + +public class DefaultSdkMetricTest { + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Before + public void methodSetup() { + DefaultSdkMetric.clearDeclaredMetrics(); + } + + @Test + public void testOf_variadicOverload_createdProperly() { + SdkMetric event = SdkMetric.create("event", Integer.class, MetricLevel.INFO, MetricCategory.CORE); + + assertThat(event.categories()).containsExactly(MetricCategory.CORE); + assertThat(event.name()).isEqualTo("event"); + assertThat(event.valueClass()).isEqualTo(Integer.class); + } + + @Test + public void testOf_setOverload_createdProperly() { + SdkMetric event = SdkMetric.create("event", Integer.class, MetricLevel.INFO, Stream.of(MetricCategory.CORE) + .collect(Collectors.toSet())); + + assertThat(event.categories()).containsExactly(MetricCategory.CORE); + assertThat(event.name()).isEqualTo("event"); + assertThat(event.valueClass()).isEqualTo(Integer.class); + } + + @Test + public void testOf_variadicOverload_c1Null_throws() { + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("must not contain null elements"); + SdkMetric.create("event", Integer.class, MetricLevel.INFO, (MetricCategory) null); + } + + @Test + public void testOf_variadicOverload_c1NotNull_cnNull_doesNotThrow() { + SdkMetric.create("event", Integer.class, MetricLevel.INFO, MetricCategory.CORE, null); + } + + @Test + public void testOf_variadicOverload_cnContainsNull_throws() { + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("must not contain null elements"); + SdkMetric.create("event", Integer.class, MetricLevel.INFO, MetricCategory.CORE, new MetricCategory[]{null }); + } + + @Test + public void testOf_setOverload_null_throws() { + thrown.expect(NullPointerException.class); + thrown.expectMessage("object is null"); + SdkMetric.create("event", Integer.class, MetricLevel.INFO, (Set) null); + } + + @Test + public void testOf_setOverload_nullElement_throws() { + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("categories must not contain null elements"); + SdkMetric.create("event", Integer.class, MetricLevel.INFO, Stream.of((MetricCategory) null).collect(Collectors.toSet())); + } + + @Test + public void testOf_namePreviouslyUsed_throws() { + String fooName = "metricEvent"; + + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage(fooName + " has already been created"); + + SdkMetric.create(fooName, Integer.class, MetricLevel.INFO, MetricCategory.CORE); + SdkMetric.create(fooName, Integer.class, MetricLevel.INFO, MetricCategory.CORE); + } + + @Test + public void testOf_namePreviouslyUsed_differentArgs_throws() { + String fooName = "metricEvent"; + + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage(fooName + " has already been created"); + + SdkMetric.create(fooName, Integer.class, MetricLevel.INFO, MetricCategory.CORE); + SdkMetric.create(fooName, Long.class, MetricLevel.INFO, MetricCategory.HTTP_CLIENT); + } + + @Test + public void testOf_namePreviouslyUsed_doesNotReplaceExisting() { + String fooName = "fooMetric"; + + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage(fooName + " has already been created"); + + SdkMetric.create(fooName, Integer.class, MetricLevel.INFO, MetricCategory.CORE); + try { + SdkMetric.create(fooName, Long.class, MetricLevel.INFO, MetricCategory.HTTP_CLIENT); + } finally { + SdkMetric fooMetric = DefaultSdkMetric.declaredEvents() + .stream() + .filter(e -> e.name().equals(fooName)) + .findFirst() + .get(); + + assertThat(fooMetric.name()).isEqualTo(fooName); + assertThat(fooMetric.valueClass()).isEqualTo(Integer.class); + assertThat(fooMetric.categories()).containsExactly(MetricCategory.CORE); + } + } +} diff --git a/core/pom.xml b/core/pom.xml index 4811b7f95cfe..a70fd0ef59de 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT core @@ -41,6 +41,7 @@ profiles regions protocols + metrics-spi diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index 806ebeabfb2a..2d99abe7c553 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT profiles diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileReader.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileReader.java index f111df7b2413..c62ca7a15ce7 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileReader.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileReader.java @@ -42,7 +42,7 @@ public final class ProfileFileReader { private static final Pattern EMPTY_LINE = Pattern.compile("^[\t ]*$"); - private static final Pattern VALID_IDENTIFIER = Pattern.compile("^[A-Za-z0-9_\\-/.%@]*$"); + private static final Pattern VALID_IDENTIFIER = Pattern.compile("^[A-Za-z0-9_\\-/.%@:]*$"); private ProfileFileReader() { } @@ -214,7 +214,7 @@ private static Optional parseProfileDefinition(ParserState state, String // If the profile name includes invalid characters, it should be ignored. if (!isValidIdentifier(profileName)) { log.warn(() -> "Ignoring profile '" + standardizedProfileName + "' on line " + state.currentLineNumber + " because " + - "it was not alphanumeric with only these special characters: - / . % @ _"); + "it was not alphanumeric with only these special characters: - / . % @ _ :"); return Optional.empty(); } @@ -257,7 +257,7 @@ private static Optional> parsePropertyDefinition(ParserStat // If the profile name includes invalid characters, it should be ignored. if (!isValidIdentifier(propertyKey)) { log.warn(() -> "Ignoring property '" + propertyKey + "' on line " + state.currentLineNumber + " because " + - "its name was not alphanumeric with only these special characters: - / . % @ _"); + "its name was not alphanumeric with only these special characters: - / . % @ _ :"); return Optional.empty(); } diff --git a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java index d39126d60206..82377ae6c6aa 100644 --- a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java +++ b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java @@ -326,14 +326,14 @@ public void invalidPropertyNamesAreIgnored() { @Test public void allValidProfileNameCharactersAreSupported() { - assertThat(configFileProfiles("[profile ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@]")) - .isEqualTo(profiles(profile("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@"))); + assertThat(configFileProfiles("[profile ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@:]")) + .isEqualTo(profiles(profile("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@:"))); } @Test public void allValidPropertyNameCharactersAreSupported() { - assertThat(configFileProfiles("[profile foo]\nABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@ = value")) - .isEqualTo(profiles(profile("foo", property("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@", + assertThat(configFileProfiles("[profile foo]\nABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@: = value")) + .isEqualTo(profiles(profile("foo", property("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_./%@:", "value")))); } diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index d6096b0e6756..75bccb898e49 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-ion-protocol/pom.xml b/core/protocols/aws-ion-protocol/pom.xml index cef394431407..61052c898c10 100644 --- a/core/protocols/aws-ion-protocol/pom.xml +++ b/core/protocols/aws-ion-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index d2491577409d..8cfd51988274 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java index b85f58246e43..1fd0a6b05670 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/BaseAwsJsonProtocolFactory.java @@ -31,6 +31,8 @@ import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.http.MetricCollectingHttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.MarshallLocation; import software.amazon.awssdk.core.traits.TimestampFormatTrait; import software.amazon.awssdk.http.SdkHttpFullRequest; @@ -102,11 +104,12 @@ public final HttpResponseHandler createResponseHandler(Js public final HttpResponseHandler createResponseHandler( JsonOperationMetadata operationMetadata, Function pojoSupplier) { - return new AwsJsonResponseHandler<>( - new JsonResponseHandler<>(protocolUnmarshaller, - pojoSupplier, - operationMetadata.hasStreamingSuccessResponse(), - operationMetadata.isPayloadJson())); + return timeUnmarshalling( + new AwsJsonResponseHandler<>( + new JsonResponseHandler<>(protocolUnmarshaller, + pojoSupplier, + operationMetadata.hasStreamingSuccessResponse(), + operationMetadata.isPayloadJson()))); } /** @@ -114,7 +117,7 @@ public final HttpResponseHandler createResponseHandler( */ public final HttpResponseHandler createErrorResponseHandler( JsonOperationMetadata errorResponseMetadata) { - return AwsJsonProtocolErrorUnmarshaller + return timeUnmarshalling(AwsJsonProtocolErrorUnmarshaller .builder() .jsonProtocolUnmarshaller(protocolUnmarshaller) .exceptions(modeledExceptions) @@ -122,7 +125,11 @@ public final HttpResponseHandler createErrorResponseHandler .errorMessageParser(AwsJsonErrorMessageParser.DEFAULT_ERROR_MESSAGE_PARSER) .jsonFactory(getSdkFactory().getJsonFactory()) .defaultExceptionSupplier(defaultServiceExceptionSupplier) - .build(); + .build()); + } + + private MetricCollectingHttpResponseHandler timeUnmarshalling(HttpResponseHandler delegate) { + return MetricCollectingHttpResponseHandler.create(CoreMetric.UNMARSHALLING_DURATION, delegate); } private StructuredJsonGenerator createGenerator(OperationInfo operationInfo) { diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java index ac1de74e5c37..ea832b762e8e 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonProtocolErrorUnmarshaller.java @@ -85,6 +85,7 @@ private AwsServiceException unmarshall(SdkHttpFullResponse response, ExecutionAt exception.message(errorMessage); exception.statusCode(statusCode(response, modeledExceptionMetadata)); exception.requestId(getRequestIdFromHeaders(response.headers())); + exception.extendedRequestId(getExtendedRequestIdFromHeaders(response.headers())); return exception.build(); } @@ -133,7 +134,11 @@ private AwsErrorDetails extractAwsErrorDetails(SdkHttpFullResponse response, } private String getRequestIdFromHeaders(Map> headers) { - return SdkHttpUtils.firstMatchingHeader(headers, X_AMZN_REQUEST_ID_HEADER).orElse(null); + return SdkHttpUtils.firstMatchingHeaderFromCollection(headers, X_AMZN_REQUEST_ID_HEADERS).orElse(null); + } + + private String getExtendedRequestIdFromHeaders(Map> headers) { + return SdkHttpUtils.firstMatchingHeader(headers, X_AMZ_ID_2_HEADER).orElse(null); } public static Builder builder() { diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonResponseHandler.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonResponseHandler.java index 0c20eafc4d2f..7569bf3f8e36 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonResponseHandler.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/AwsJsonResponseHandler.java @@ -27,6 +27,7 @@ import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.utils.http.SdkHttpUtils; @SdkInternalApi public final class AwsJsonResponseHandler implements HttpResponseHandler { @@ -57,7 +58,9 @@ public T handle(SdkHttpFullResponse response, ExecutionAttributes executionAttri private AwsResponseMetadata generateResponseMetadata(SdkHttpResponse response) { Map metadata = new HashMap<>(); - metadata.put(AWS_REQUEST_ID, response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null)); + metadata.put(AWS_REQUEST_ID, SdkHttpUtils.firstMatchingHeaderFromCollection(response.headers(), + X_AMZN_REQUEST_ID_HEADERS) + .orElse(null)); response.headers().forEach((key, value) -> metadata.put(key, value.get(0))); return DefaultAwsResponseMetadata.create(metadata); diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonResponseHandler.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonResponseHandler.java index fbf0e001069f..4af3e85d0717 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonResponseHandler.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonResponseHandler.java @@ -74,6 +74,9 @@ public T handle(SdkHttpFullResponse response, ExecutionAttributes executionAttri response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER) .orElse("not available")); + SdkStandardLogger.REQUEST_ID_LOGGER.debug(() -> X_AMZ_ID_2_HEADER + " : " + + response.firstMatchingHeader(X_AMZ_ID_2_HEADER) + .orElse("not available")); try { T result = unmarshaller.unmarshall(pojoSupplier.apply(response), response); diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index d57ad57b0fd1..a4fa67f28eda 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsQueryProtocolFactory.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsQueryProtocolFactory.java index ae2891123282..e7e791f555a9 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsQueryProtocolFactory.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/AwsQueryProtocolFactory.java @@ -28,6 +28,8 @@ import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.http.MetricCollectingHttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.core.OperationInfo; @@ -47,20 +49,20 @@ public class AwsQueryProtocolFactory { private final SdkClientConfiguration clientConfiguration; private final List modeledExceptions; private final Supplier defaultServiceExceptionSupplier; - private final AwsXmlErrorProtocolUnmarshaller errorUnmarshaller; + private final MetricCollectingHttpResponseHandler errorUnmarshaller; AwsQueryProtocolFactory(Builder builder) { this.clientConfiguration = builder.clientConfiguration; this.modeledExceptions = unmodifiableList(builder.modeledExceptions); this.defaultServiceExceptionSupplier = builder.defaultServiceExceptionSupplier; - this.errorUnmarshaller = AwsXmlErrorProtocolUnmarshaller + this.errorUnmarshaller = timeUnmarshalling(AwsXmlErrorProtocolUnmarshaller .builder() .defaultExceptionSupplier(defaultServiceExceptionSupplier) .exceptions(modeledExceptions) // We don't set result wrapper since that's handled by the errorRootExtractor .errorUnmarshaller(QueryProtocolUnmarshaller.builder().build()) .errorRootExtractor(this::getErrorRoot) - .build(); + .build()); } /** @@ -86,10 +88,9 @@ public final ProtocolMarshaller createProtocolMarshaller( * @return New {@link HttpResponseHandler} for success responses. */ public final HttpResponseHandler createResponseHandler(Supplier pojoSupplier) { - return new AwsQueryResponseHandler<>(QueryProtocolUnmarshaller.builder() - .hasResultWrapper(!isEc2()) - .build(), - r -> pojoSupplier.get()); + return timeUnmarshalling(new AwsQueryResponseHandler<>(QueryProtocolUnmarshaller.builder() + .hasResultWrapper(!isEc2()) + .build(), r -> pojoSupplier.get())); } /** @@ -100,6 +101,10 @@ public final HttpResponseHandler createErrorResponseHandler return errorUnmarshaller; } + private MetricCollectingHttpResponseHandler timeUnmarshalling(HttpResponseHandler delegate) { + return MetricCollectingHttpResponseHandler.create(CoreMetric.UNMARSHALLING_DURATION, delegate); + } + /** * Extracts the element from the root XML document. Method is protected as EC2 has a slightly * different location. diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsQueryResponseHandler.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsQueryResponseHandler.java index 1df64688999b..bf0c23ad6fb9 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsQueryResponseHandler.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsQueryResponseHandler.java @@ -32,6 +32,7 @@ import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Pair; +import software.amazon.awssdk.utils.http.SdkHttpUtils; /** * Response handler for AWS/Query services and Amazon EC2 which is a dialect of the Query protocol. @@ -84,7 +85,8 @@ private T unmarshallResponse(SdkHttpFullResponse response) throws Exception { private AwsResponseMetadata generateResponseMetadata(SdkHttpResponse response, Map metadata) { if (!metadata.containsKey(AWS_REQUEST_ID)) { metadata.put(AWS_REQUEST_ID, - response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null)); + SdkHttpUtils.firstMatchingHeaderFromCollection(response.headers(), X_AMZN_REQUEST_ID_HEADERS) + .orElse(null)); } response.headers().forEach((key, value) -> metadata.put(key, value.get(0))); diff --git a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsXmlErrorUnmarshaller.java b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsXmlErrorUnmarshaller.java index 1ff4f6f44ce7..b94d2e3ca48c 100644 --- a/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsXmlErrorUnmarshaller.java +++ b/core/protocols/aws-query-protocol/src/main/java/software/amazon/awssdk/protocols/query/internal/unmarshall/AwsXmlErrorUnmarshaller.java @@ -40,6 +40,7 @@ @SdkInternalApi public final class AwsXmlErrorUnmarshaller { private static final String X_AMZN_REQUEST_ID_HEADER = "x-amzn-RequestId"; + private static final String X_AMZ_ID_2_HEADER = "x-amz-id-2"; private final List exceptions; private final Supplier defaultExceptionSupplier; @@ -90,6 +91,7 @@ public AwsServiceException unmarshall(XmlElement documentRoot, .build(); builder.requestId(getRequestId(response, documentRoot)) + .extendedRequestId(getExtendedRequestId(response)) .statusCode(response.statusCode()) .clockSkew(getClockSkew(executionAttributes)) .awsErrorDetails(awsErrorDetails); @@ -176,6 +178,16 @@ private String getRequestId(SdkHttpFullResponse response, XmlElement document) { response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null); } + /** + * Extracts the extended request ID from the response headers. + * + * @param response The HTTP response object. + * @return Extended Request ID string or null if not present. + */ + private String getExtendedRequestId(SdkHttpFullResponse response) { + return response.firstMatchingHeader(X_AMZ_ID_2_HEADER).orElse(null); + } + /** * Builder for {@link AwsXmlErrorUnmarshaller}. */ diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index e4e38bf1dd6f..1c786dc34f9b 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsXmlProtocolFactory.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsXmlProtocolFactory.java index c771e82f7193..296ba2483e77 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsXmlProtocolFactory.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/AwsXmlProtocolFactory.java @@ -30,7 +30,9 @@ import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.http.MetricCollectingHttpResponseHandler; import software.amazon.awssdk.core.internal.http.CombinedResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.core.OperationInfo; @@ -72,20 +74,21 @@ public class AwsXmlProtocolFactory { private final List modeledExceptions; private final Supplier defaultServiceExceptionSupplier; - private final AwsXmlErrorProtocolUnmarshaller errorUnmarshaller; + private final HttpResponseHandler errorUnmarshaller; private final SdkClientConfiguration clientConfiguration; AwsXmlProtocolFactory(Builder builder) { this.modeledExceptions = unmodifiableList(builder.modeledExceptions); this.defaultServiceExceptionSupplier = builder.defaultServiceExceptionSupplier; this.clientConfiguration = builder.clientConfiguration; - this.errorUnmarshaller = AwsXmlErrorProtocolUnmarshaller - .builder() - .defaultExceptionSupplier(defaultServiceExceptionSupplier) - .exceptions(modeledExceptions) - .errorUnmarshaller(XML_PROTOCOL_UNMARSHALLER) - .errorRootExtractor(this::getErrorRoot) - .build(); + + this.errorUnmarshaller = timeUnmarshalling( + AwsXmlErrorProtocolUnmarshaller.builder() + .defaultExceptionSupplier(defaultServiceExceptionSupplier) + .exceptions(modeledExceptions) + .errorUnmarshaller(XML_PROTOCOL_UNMARSHALLER) + .errorRootExtractor(this::getErrorRoot) + .build()); } /** @@ -103,9 +106,8 @@ public ProtocolMarshaller createProtocolMarshaller(Operation public HttpResponseHandler createResponseHandler(Supplier pojoSupplier, XmlOperationMetadata staxOperationMetadata) { - return new AwsXmlResponseHandler<>( - XML_PROTOCOL_UNMARSHALLER, r -> pojoSupplier.get(), - staxOperationMetadata.isHasStreamingSuccessResponse()); + return timeUnmarshalling(new AwsXmlResponseHandler<>(XML_PROTOCOL_UNMARSHALLER, r -> pojoSupplier.get(), + staxOperationMetadata.isHasStreamingSuccessResponse())); } protected Function createResponseTransformer( @@ -127,6 +129,10 @@ public HttpResponseHandler createErrorResponseHandler() { return errorUnmarshaller; } + private MetricCollectingHttpResponseHandler timeUnmarshalling(HttpResponseHandler delegate) { + return MetricCollectingHttpResponseHandler.create(CoreMetric.UNMARSHALLING_DURATION, delegate); + } + public HttpResponseHandler> createCombinedResponseHandler( Supplier pojoSupplier, XmlOperationMetadata staxOperationMetadata) { diff --git a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseHandler.java b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseHandler.java index 71a8b7ae57bd..0a81b613928a 100644 --- a/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseHandler.java +++ b/core/protocols/aws-xml-protocol/src/main/java/software/amazon/awssdk/protocols/xml/internal/unmarshall/AwsXmlResponseHandler.java @@ -32,6 +32,7 @@ import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.http.SdkHttpUtils; /** * Response handler for REST-XML services (Cloudfront, Route53, and S3). @@ -92,7 +93,7 @@ private T unmarshallResponse(SdkHttpFullResponse response) throws Exception { private AwsResponseMetadata generateResponseMetadata(SdkHttpResponse response) { Map metadata = new HashMap<>(); metadata.put(AWS_REQUEST_ID, - response.firstMatchingHeader(X_AMZN_REQUEST_ID_HEADER).orElse(null)); + SdkHttpUtils.firstMatchingHeaderFromCollection(response.headers(), X_AMZN_REQUEST_ID_HEADERS).orElse(null)); response.headers().forEach((key, value) -> metadata.put(key, value.get(0))); return DefaultAwsResponseMetadata.create(metadata); diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index 9cec8d3a84d0..b4ebb013a48a 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index db7b18df00aa..9d27fe4e88a6 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index 5568879730cc..67bbb6ccf50d 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT regions diff --git a/core/regions/src/main/java/software/amazon/awssdk/regions/servicemetadata/EnhancedS3ServiceMetadata.java b/core/regions/src/main/java/software/amazon/awssdk/regions/servicemetadata/EnhancedS3ServiceMetadata.java index f0c93a9c5ed3..a3467944d5ee 100644 --- a/core/regions/src/main/java/software/amazon/awssdk/regions/servicemetadata/EnhancedS3ServiceMetadata.java +++ b/core/regions/src/main/java/software/amazon/awssdk/regions/servicemetadata/EnhancedS3ServiceMetadata.java @@ -100,12 +100,16 @@ private static String envVarSetting() { return SdkSystemSetting.AWS_S3_US_EAST_1_REGIONAL_ENDPOINT.getStringValue().orElse(null); } - private String profileFileSetting(Supplier profileFileSupplier, Supplier profileName) { + private String profileFileSetting(Supplier profileFileSupplier, Supplier profileNameSupplier) { try { - return profileFileSupplier.get() - .profile(profileName.get()) - .flatMap(p -> p.property(ProfileProperty.S3_US_EAST_1_REGIONAL_ENDPOINT)) - .orElse(null); + ProfileFile profileFile = profileFileSupplier.get(); + String profileName = profileNameSupplier.get(); + if (profileFile == null || profileName == null) { + return null; + } + return profileFile.profile(profileName) + .flatMap(p -> p.property(ProfileProperty.S3_US_EAST_1_REGIONAL_ENDPOINT)) + .orElse(null); } catch (Exception t) { log.warn(() -> "Unable to load config file", t); return null; diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index c05987f7da3f..d4db2da47a75 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -8,8 +8,11 @@ "dnsSuffix" : "amazonaws.com", "partition" : "aws", "partitionName" : "AWS Standard", - "regionRegex" : "^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$", + "regionRegex" : "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", "regions" : { + "af-south-1" : { + "description" : "Africa (Cape Town)" + }, "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, @@ -32,19 +35,22 @@ "description" : "Canada (Central)" }, "eu-central-1" : { - "description" : "EU (Frankfurt)" + "description" : "Europe (Frankfurt)" }, "eu-north-1" : { - "description" : "EU (Stockholm)" + "description" : "Europe (Stockholm)" + }, + "eu-south-1" : { + "description" : "Europe (Milan)" }, "eu-west-1" : { - "description" : "EU (Ireland)" + "description" : "Europe (Ireland)" }, "eu-west-2" : { - "description" : "EU (London)" + "description" : "Europe (London)" }, "eu-west-3" : { - "description" : "EU (Paris)" + "description" : "Europe (Paris)" }, "me-south-1" : { "description" : "Middle East (Bahrain)" @@ -73,6 +79,7 @@ }, "access-analyzer" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -82,6 +89,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -95,6 +103,7 @@ }, "acm" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -110,6 +119,7 @@ }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -200,8 +210,37 @@ "us-west-2" : { } } }, + "api.detective" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "api.ecr" : { "endpoints" : { + "af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "api.ecr.af-south-1.amazonaws.com" + }, "ap-east-1" : { "credentialScope" : { "region" : "ap-east-1" @@ -256,6 +295,12 @@ }, "hostname" : "api.ecr.eu-north-1.amazonaws.com" }, + "eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "hostname" : "api.ecr.eu-south-1.amazonaws.com" + }, "eu-west-1" : { "credentialScope" : { "region" : "eu-west-1" @@ -336,6 +381,28 @@ } } }, + "api.elastic-inference" : { + "endpoints" : { + "ap-northeast-1" : { + "hostname" : "api.elastic-inference.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "hostname" : "api.elastic-inference.ap-northeast-2.amazonaws.com" + }, + "eu-west-1" : { + "hostname" : "api.elastic-inference.eu-west-1.amazonaws.com" + }, + "us-east-1" : { + "hostname" : "api.elastic-inference.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "hostname" : "api.elastic-inference.us-east-2.amazonaws.com" + }, + "us-west-2" : { + "hostname" : "api.elastic-inference.us-west-2.amazonaws.com" + } + } + }, "api.mediatailor" : { "endpoints" : { "ap-northeast-1" : { }, @@ -406,6 +473,7 @@ }, "apigateway" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -415,6 +483,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -431,6 +500,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -440,6 +510,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -483,6 +554,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, @@ -504,11 +576,16 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -539,6 +616,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -548,6 +626,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -564,6 +643,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -571,8 +651,12 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -615,35 +699,29 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, - "fips-ca-central-1" : { - "credentialScope" : { - "region" : "ca-central-1" - }, - "hostname" : "batch-fips.ca-central-1.amazonaws.com" - }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" }, - "hostname" : "batch-fips.us-east-1.amazonaws.com" + "hostname" : "fips.batch.us-east-1.amazonaws.com" }, "fips-us-east-2" : { "credentialScope" : { "region" : "us-east-2" }, - "hostname" : "batch-fips.us-east-2.amazonaws.com" + "hostname" : "fips.batch.us-east-2.amazonaws.com" }, "fips-us-west-1" : { "credentialScope" : { "region" : "us-west-1" }, - "hostname" : "batch-fips.us-west-1.amazonaws.com" + "hostname" : "fips.batch.us-west-1.amazonaws.com" }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" }, - "hostname" : "batch-fips.us-west-2.amazonaws.com" + "hostname" : "fips.batch.us-west-2.amazonaws.com" }, "me-south-1" : { }, "sa-east-1" : { }, @@ -696,6 +774,7 @@ }, "cloud9" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -706,8 +785,12 @@ "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -726,6 +809,7 @@ }, "cloudformation" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -735,6 +819,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -840,6 +925,7 @@ }, "cloudtrail" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -849,6 +935,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -884,6 +971,20 @@ "us-west-2" : { } } }, + "codeartifact" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "codebuild" : { "endpoints" : { "ap-east-1" : { }, @@ -960,6 +1061,7 @@ }, "codedeploy" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -969,6 +1071,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1017,6 +1120,36 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "codepipeline-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "codepipeline-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "codepipeline-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "codepipeline-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "codepipeline-fips.us-west-2.amazonaws.com" + }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1050,6 +1183,7 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1186,6 +1320,24 @@ "ca-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "comprehendmedical-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "comprehendmedical-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "comprehendmedical-fips.us-west-2.amazonaws.com" + }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -1265,6 +1417,7 @@ "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -1295,6 +1448,7 @@ }, "datasync" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1304,6 +1458,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1369,6 +1524,7 @@ }, "directconnect" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1378,9 +1534,34 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "directconnect-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "directconnect-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "directconnect-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "directconnect-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -1391,13 +1572,16 @@ }, "discovery" : { "endpoints" : { + "ap-northeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "us-east-1" : { }, "us-west-2" : { } } }, "dms" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1413,6 +1597,7 @@ }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1520,6 +1705,36 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "ds-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "ds-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "ds-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "ds-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "ds-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -1533,6 +1748,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1548,6 +1764,7 @@ }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1595,6 +1812,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1604,9 +1822,40 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "ec2-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "ec2-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "ec2-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "ec2-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "ec2-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -1617,6 +1866,7 @@ }, "ecs" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1626,9 +1876,34 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "ecs-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "ecs-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "ecs-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "ecs-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -1637,29 +1912,10 @@ "us-west-2" : { } } }, - "elastic-inference" : { - "endpoints" : { - "ap-northeast-1" : { - "hostname" : "api.elastic-inference.ap-northeast-1.amazonaws.com" - }, - "ap-northeast-2" : { - "hostname" : "api.elastic-inference.ap-northeast-2.amazonaws.com" - }, - "eu-west-1" : { - "hostname" : "api.elastic-inference.eu-west-1.amazonaws.com" - }, - "us-east-1" : { - "hostname" : "api.elastic-inference.us-east-1.amazonaws.com" - }, - "us-east-2" : { - "hostname" : "api.elastic-inference.us-east-2.amazonaws.com" - }, - "us-west-2" : { - "hostname" : "api.elastic-inference.us-west-2.amazonaws.com" - } - } - }, - "elasticache" : { + "eks" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, "endpoints" : { "ap-east-1" : { }, "ap-northeast-1" : { }, @@ -1673,22 +1929,64 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, - "fips" : { + "fips-us-east-1" : { "credentialScope" : { - "region" : "us-west-1" + "region" : "us-east-1" }, - "hostname" : "elasticache-fips.us-west-1.amazonaws.com" + "hostname" : "fips.eks.us-east-1.amazonaws.com" }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } - } + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "fips.eks.us-east-2.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "fips.eks.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, + "elasticache" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "elasticache-fips.us-west-1.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } }, "elasticbeanstalk" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1698,6 +1996,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1735,6 +2034,7 @@ }, "elasticfilesystem" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1744,9 +2044,130 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "hostname" : "elasticfilesystem-fips.af-south-1.amazonaws.com" + }, + "fips-ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "elasticfilesystem-fips.ap-east-1.amazonaws.com" + }, + "fips-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "elasticfilesystem-fips.ap-northeast-1.amazonaws.com" + }, + "fips-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "elasticfilesystem-fips.ap-northeast-2.amazonaws.com" + }, + "fips-ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "elasticfilesystem-fips.ap-south-1.amazonaws.com" + }, + "fips-ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "elasticfilesystem-fips.ap-southeast-1.amazonaws.com" + }, + "fips-ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "elasticfilesystem-fips.ap-southeast-2.amazonaws.com" + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "elasticfilesystem-fips.ca-central-1.amazonaws.com" + }, + "fips-eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "elasticfilesystem-fips.eu-central-1.amazonaws.com" + }, + "fips-eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "elasticfilesystem-fips.eu-north-1.amazonaws.com" + }, + "fips-eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "hostname" : "elasticfilesystem-fips.eu-south-1.amazonaws.com" + }, + "fips-eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "elasticfilesystem-fips.eu-west-1.amazonaws.com" + }, + "fips-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "elasticfilesystem-fips.eu-west-2.amazonaws.com" + }, + "fips-eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "elasticfilesystem-fips.eu-west-3.amazonaws.com" + }, + "fips-me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "elasticfilesystem-fips.me-south-1.amazonaws.com" + }, + "fips-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "elasticfilesystem-fips.sa-east-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "elasticfilesystem-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "elasticfilesystem-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "elasticfilesystem-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "elasticfilesystem-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -1760,6 +2181,7 @@ "protocols" : [ "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1769,6 +2191,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1810,6 +2233,7 @@ "sslCommonName" : "{region}.{service}.{dnsSuffix}" }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1821,6 +2245,7 @@ "sslCommonName" : "{service}.{region}.{dnsSuffix}" }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1898,6 +2323,7 @@ }, "es" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1907,6 +2333,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1926,6 +2353,7 @@ }, "events" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -1935,6 +2363,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2021,6 +2450,7 @@ "protocols" : [ "https" ] }, "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -2122,6 +2552,7 @@ }, "hostname" : "fms-fips.us-west-2.amazonaws.com" }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2161,8 +2592,11 @@ "endpoints" : { "ap-east-1" : { }, "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, @@ -2196,6 +2630,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2205,6 +2640,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2260,6 +2696,30 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "glue-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "glue-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "glue-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "glue-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -2289,7 +2749,9 @@ }, "groundstation" : { "endpoints" : { + "ap-southeast-2" : { }, "eu-north-1" : { }, + "eu-west-1" : { }, "me-south-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -2350,6 +2812,11 @@ "us-east-1" : { } } }, + "honeycode" : { + "endpoints" : { + "us-west-2" : { } + } + }, "iam" : { "endpoints" : { "aws-global" : { @@ -2598,6 +3065,7 @@ }, "kinesis" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2607,6 +3075,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2684,6 +3153,7 @@ }, "kms" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2693,6 +3163,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2713,8 +3184,11 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -2723,6 +3197,7 @@ }, "lambda" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2732,9 +3207,34 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "lambda-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "lambda-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "lambda-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "lambda-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -2745,6 +3245,7 @@ }, "license-manager" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2754,9 +3255,34 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "license-manager-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "license-manager-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "license-manager-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "license-manager-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -2784,6 +3310,7 @@ }, "logs" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2793,9 +3320,34 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "logs-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "logs-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "logs-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "logs-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -2810,21 +3362,84 @@ "us-east-1" : { } } }, - "managedblockchain" : { - "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-southeast-1" : { }, - "eu-west-1" : { }, - "us-east-1" : { } - } - }, - "marketplacecommerceanalytics" : { + "macie" : { "endpoints" : { - "us-east-1" : { } + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "macie-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "macie-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { }, + "us-west-2" : { } } }, - "mediaconnect" : { + "macie2" : { + "endpoints" : { + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "macie2-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "macie2-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "macie2-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "macie2-fips.us-west-2.amazonaws.com" + }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "managedblockchain" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { } + } + }, + "marketplacecommerceanalytics" : { + "endpoints" : { + "us-east-1" : { } + } + }, + "mediaconnect" : { "endpoints" : { "ap-east-1" : { }, "ap-northeast-1" : { }, @@ -2853,9 +3468,40 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "mediaconvert-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "mediaconvert-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "mediaconvert-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "mediaconvert-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "mediaconvert-fips.us-west-2.amazonaws.com" + }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2889,6 +3535,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2906,6 +3553,7 @@ "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -2917,6 +3565,7 @@ } }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2926,6 +3575,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -2939,8 +3589,10 @@ }, "mgh" : { "endpoints" : { + "ap-northeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, + "us-east-1" : { }, "us-west-2" : { } } }, @@ -2956,8 +3608,12 @@ } }, "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -2967,6 +3623,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -2976,9 +3633,34 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "monitoring-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "monitoring-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "monitoring-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "monitoring-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -3128,6 +3810,12 @@ }, "hostname" : "rds.us-east-2.amazonaws.com" }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "rds.us-west-1.amazonaws.com" + }, "us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -3220,30 +3908,6 @@ "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, - "fips-us-east-1" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "hostname" : "opsworks-cm-fips.us-east-1.amazonaws.com" - }, - "fips-us-east-2" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "hostname" : "opsworks-cm-fips.us-east-2.amazonaws.com" - }, - "fips-us-west-1" : { - "credentialScope" : { - "region" : "us-west-1" - }, - "hostname" : "opsworks-cm-fips.us-west-1.amazonaws.com" - }, - "fips-us-west-2" : { - "credentialScope" : { - "region" : "us-west-2" - }, - "hostname" : "opsworks-cm-fips.us-west-2.amazonaws.com" - }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -3257,6 +3921,12 @@ "region" : "us-east-1" }, "hostname" : "organizations.us-east-1.amazonaws.com" + }, + "fips-aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "organizations-fips.us-east-1.amazonaws.com" } }, "isRegionalized" : false, @@ -3264,9 +3934,11 @@ }, "outposts" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -3275,7 +3947,38 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "outposts-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "outposts-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "outposts-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "outposts-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "outposts-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -3449,6 +4152,7 @@ }, "ram" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3458,6 +4162,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -3471,6 +4176,7 @@ }, "rds" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3480,10 +4186,41 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, "me-south-1" : { }, + "rds-fips.ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "rds-fips.ca-central-1.amazonaws.com" + }, + "rds-fips.us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "rds-fips.us-east-1.amazonaws.com" + }, + "rds-fips.us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "rds-fips.us-east-2.amazonaws.com" + }, + "rds-fips.us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "rds-fips.us-west-1.amazonaws.com" + }, + "rds-fips.us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "rds-fips.us-west-2.amazonaws.com" + }, "sa-east-1" : { }, "us-east-1" : { "sslCommonName" : "{service}.{dnsSuffix}" @@ -3495,6 +4232,7 @@ }, "redshift" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3504,6 +4242,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -3555,6 +4294,30 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "rekognition-fips.us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "rekognition-fips.us-east-1.amazonaws.com" + }, + "rekognition-fips.us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "rekognition-fips.us-east-2.amazonaws.com" + }, + "rekognition-fips.us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "rekognition-fips.us-west-1.amazonaws.com" + }, + "rekognition-fips.us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "rekognition-fips.us-west-2.amazonaws.com" + }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -3563,6 +4326,7 @@ }, "resource-groups" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3572,6 +4336,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -3640,6 +4405,7 @@ "protocols" : [ "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3649,6 +4415,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -3667,8 +4434,12 @@ } }, "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -3725,6 +4496,7 @@ "signatureVersions" : [ "s3v4" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { "hostname" : "s3.ap-northeast-1.amazonaws.com", @@ -3750,6 +4522,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { "hostname" : "s3.eu-west-1.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] @@ -3947,10 +4720,22 @@ }, "schemas" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -3974,6 +4759,7 @@ }, "secretsmanager" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -3983,6 +4769,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4020,6 +4807,7 @@ }, "securityhub" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4029,9 +4817,34 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "securityhub-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "securityhub-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "securityhub-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "securityhub-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -4103,6 +4916,7 @@ }, "servicecatalog" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4112,6 +4926,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4188,12 +5003,25 @@ "sslCommonName" : "shield.us-east-1.amazonaws.com" }, "endpoints" : { - "us-east-1" : { } + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "shield.us-east-1.amazonaws.com" + }, + "fips-aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "shield-fips.us-east-1.amazonaws.com" + } }, - "isRegionalized" : false + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" }, "sms" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4203,6 +5031,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4352,6 +5181,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4361,6 +5191,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4402,6 +5233,7 @@ "sslCommonName" : "{region}.queue.{dnsSuffix}" }, "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4411,6 +5243,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4450,6 +5283,7 @@ }, "ssm" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4459,6 +5293,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4520,6 +5355,7 @@ }, "states" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4529,6 +5365,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4566,6 +5403,7 @@ }, "storagegateway" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4575,6 +5413,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4652,6 +5491,7 @@ }, "sts" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4667,6 +5507,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4716,6 +5557,7 @@ }, "swf" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4725,6 +5567,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4762,6 +5605,7 @@ }, "tagging" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -4771,6 +5615,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -4921,6 +5766,12 @@ }, "waf-regional" : { "endpoints" : { + "ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "waf-regional.ap-east-1.amazonaws.com" + }, "ap-northeast-1" : { "credentialScope" : { "region" : "ap-northeast-1" @@ -4987,7 +5838,13 @@ }, "hostname" : "waf-regional.eu-west-3.amazonaws.com" }, - "fips-ap-northeast-1" : { + "fips-ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "hostname" : "waf-regional-fips.ap-east-1.amazonaws.com" + }, + "fips-ap-northeast-1" : { "credentialScope" : { "region" : "ap-northeast-1" }, @@ -5053,6 +5910,12 @@ }, "hostname" : "waf-regional-fips.eu-west-3.amazonaws.com" }, + "fips-me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "waf-regional-fips.me-south-1.amazonaws.com" + }, "fips-sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -5083,6 +5946,12 @@ }, "hostname" : "waf-regional-fips.us-west-2.amazonaws.com" }, + "me-south-1" : { + "credentialScope" : { + "region" : "me-south-1" + }, + "hostname" : "waf-regional.me-south-1.amazonaws.com" + }, "sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -5121,6 +5990,18 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-west-1" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "workdocs-fips.us-east-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "workdocs-fips.us-west-2.amazonaws.com" + }, "us-east-1" : { }, "us-west-2" : { } } @@ -5152,6 +6033,7 @@ }, "xray" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -5161,6 +6043,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -5214,6 +6097,12 @@ } } }, + "api.sagemaker" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "apigateway" : { "endpoints" : { "cn-north-1" : { }, @@ -5236,6 +6125,7 @@ }, "athena" : { "endpoints" : { + "cn-north-1" : { }, "cn-northwest-1" : { } } }, @@ -5248,6 +6138,15 @@ "cn-northwest-1" : { } } }, + "autoscaling-plans" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "backup" : { "endpoints" : { "cn-north-1" : { }, @@ -5260,6 +6159,30 @@ "cn-northwest-1" : { } } }, + "budgets" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "budgets.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, + "ce" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "ce.cn-northwest-1.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, "cloudformation" : { "endpoints" : { "cn-north-1" : { }, @@ -5291,6 +6214,12 @@ "cn-northwest-1" : { } } }, + "codecommit" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "codedeploy" : { "endpoints" : { "cn-north-1" : { }, @@ -5367,6 +6296,15 @@ "cn-northwest-1" : { } } }, + "eks" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "elasticache" : { "endpoints" : { "cn-north-1" : { }, @@ -5382,7 +6320,19 @@ "elasticfilesystem" : { "endpoints" : { "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-northwest-1" : { }, + "fips-cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn" + }, + "fips-cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn" + } } }, "elasticloadbalancing" : { @@ -5437,6 +6387,7 @@ }, "glue" : { "endpoints" : { + "cn-north-1" : { }, "cn-northwest-1" : { } } }, @@ -5484,12 +6435,24 @@ "cn-northwest-1" : { } } }, + "kafka" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "kinesis" : { "endpoints" : { "cn-north-1" : { }, "cn-northwest-1" : { } } }, + "kinesisanalytics" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "kms" : { "endpoints" : { "cn-north-1" : { }, @@ -5543,6 +6506,24 @@ } } }, + "organizations" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "organizations.cn-northwest-1.amazonaws.com.cn" + }, + "fips-aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "organizations.cn-northwest-1.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, "polly" : { "endpoints" : { "cn-northwest-1" : { } @@ -5560,6 +6541,24 @@ "cn-northwest-1" : { } } }, + "route53" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "route53.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, + "runtime.sagemaker" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "s3" : { "defaults" : { "protocols" : [ "http", "https" ], @@ -5620,11 +6619,18 @@ "snowball" : { "endpoints" : { "cn-north-1" : { }, + "cn-northwest-1" : { }, "fips-cn-north-1" : { "credentialScope" : { "region" : "cn-north-1" }, "hostname" : "snowball-fips.cn-north-1.amazonaws.com.cn" + }, + "fips-cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "snowball-fips.cn-northwest-1.amazonaws.com.cn" } } }, @@ -5752,7 +6758,7 @@ "description" : "AWS GovCloud (US-East)" }, "us-gov-west-1" : { - "description" : "AWS GovCloud (US)" + "description" : "AWS GovCloud (US-West)" } }, "services" : { @@ -5773,6 +6779,18 @@ "protocols" : [ "https" ] }, "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "acm-pca.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "acm-pca.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -5807,7 +6825,19 @@ }, "api.sagemaker" : { "endpoints" : { - "us-gov-west-1" : { } + "us-gov-west-1" : { }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "api-fips.sagemaker.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1-fips-secondary" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "api.sagemaker.us-gov-west-1.amazonaws.com" + } } }, "apigateway" : { @@ -5862,7 +6892,9 @@ }, "autoscaling" : { "endpoints" : { - "us-gov-east-1" : { }, + "us-gov-east-1" : { + "protocols" : [ "http", "https" ] + }, "us-gov-west-1" : { "protocols" : [ "http", "https" ] } @@ -5877,20 +6909,28 @@ "us-gov-west-1" : { } } }, + "backup" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "batch" : { "endpoints" : { - "us-gov-east-1" : { + "fips-us-gov-east-1" : { "credentialScope" : { "region" : "us-gov-east-1" }, "hostname" : "batch.us-gov-east-1.amazonaws.com" }, - "us-gov-west-1" : { + "fips-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" }, "hostname" : "batch.us-gov-west-1.amazonaws.com" - } + }, + "us-gov-east-1" : { }, + "us-gov-west-1" : { } } }, "clouddirectory" : { @@ -5900,8 +6940,18 @@ }, "cloudformation" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "cloudformation.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "cloudformation.us-gov-west-1.amazonaws.com" + } } }, "cloudhsm" : { @@ -5922,8 +6972,18 @@ }, "cloudtrail" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "cloudtrail.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "cloudtrail.us-gov-west-1.amazonaws.com" + } } }, "codebuild" : { @@ -5974,6 +7034,33 @@ } } }, + "codepipeline" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "codepipeline-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { } + } + }, + "cognito-identity" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, + "cognito-idp" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "cognito-idp-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { } + } + }, "comprehend" : { "defaults" : { "protocols" : [ "https" ] @@ -5990,6 +7077,12 @@ }, "comprehendmedical" : { "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "comprehendmedical-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-west-1" : { } } }, @@ -6030,8 +7123,18 @@ }, "directconnect" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "directconnect.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "directconnect.us-gov-west-1.amazonaws.com" + } } }, "dms" : { @@ -6046,20 +7149,42 @@ "us-gov-west-1" : { } } }, - "ds" : { + "docdb" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "rds.us-gov-west-1.amazonaws.com" + } } }, - "dynamodb" : { + "ds" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-east-1-fips" : { + "fips-us-gov-east-1" : { "credentialScope" : { "region" : "us-gov-east-1" }, - "hostname" : "dynamodb.us-gov-east-1.amazonaws.com" + "hostname" : "ds-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "ds-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "dynamodb" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "dynamodb.us-gov-east-1.amazonaws.com" }, "us-gov-west-1" : { }, "us-gov-west-1-fips" : { @@ -6072,11 +7197,42 @@ }, "ec2" : { "endpoints" : { + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "ec2.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "ec2.us-gov-west-1.amazonaws.com" + } + } + }, + "ecs" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "ecs-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "ecs-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, - "ecs" : { + "eks" : { + "defaults" : { + "protocols" : [ "http", "https" ] + }, "endpoints" : { "us-gov-east-1" : { }, "us-gov-west-1" : { } @@ -6088,7 +7244,7 @@ "credentialScope" : { "region" : "us-gov-west-1" }, - "hostname" : "elasticache-fips.us-gov-west-1.amazonaws.com" + "hostname" : "elasticache.us-gov-west-1.amazonaws.com" }, "us-gov-east-1" : { }, "us-gov-west-1" : { } @@ -6112,12 +7268,36 @@ }, "elasticfilesystem" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "elasticfilesystem-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "elasticfilesystem-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, "elasticloadbalancing" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "elasticloadbalancing-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "elasticloadbalancing-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { "protocols" : [ "http", "https" ] @@ -6126,12 +7306,35 @@ }, "elasticmapreduce" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "elasticmapreduce.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "elasticmapreduce.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { "protocols" : [ "https" ] } } }, + "email" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "email-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { } + } + }, "es" : { "endpoints" : { "fips" : { @@ -6146,8 +7349,18 @@ }, "events" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "events.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "events.us-gov-west-1.amazonaws.com" + } } }, "firehose" : { @@ -6187,6 +7400,18 @@ }, "glue" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "glue-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "glue-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -6196,7 +7421,12 @@ "protocols" : [ "https" ] }, "endpoints" : { - "us-gov-west-1" : { } + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "greengrass.us-gov-west-1.amazonaws.com" + } }, "isRegionalized" : true }, @@ -6205,7 +7435,13 @@ "protocols" : [ "https" ] }, "endpoints" : { - "us-gov-west-1" : { } + "us-gov-west-1" : { }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "guardduty.us-gov-west-1.amazonaws.com" + } }, "isRegionalized" : true }, @@ -6221,6 +7457,12 @@ "region" : "us-gov-west-1" }, "hostname" : "iam.us-gov.amazonaws.com" + }, + "iam-govcloud-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "iam.us-gov.amazonaws.com" } }, "isRegionalized" : false, @@ -6259,7 +7501,31 @@ "us-gov-west-1" : { } } }, + "kafka" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "kinesis" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "kinesis-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "kinesis-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "kinesisanalytics" : { "endpoints" : { "us-gov-east-1" : { }, "us-gov-west-1" : { } @@ -6279,25 +7545,64 @@ }, "lambda" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "lambda-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "lambda-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, "license-manager" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "license-manager-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "license-manager-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, "logs" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "logs.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "logs.us-gov-west-1.amazonaws.com" + } } }, "mediaconvert" : { "endpoints" : { - "us-gov-west-1" : { } + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "mediaconvert.us-gov-west-1.amazonaws.com" + } } }, "metering.marketplace" : { @@ -6313,6 +7618,18 @@ }, "monitoring" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "monitoring.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "monitoring.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -6340,6 +7657,12 @@ "region" : "us-gov-west-1" }, "hostname" : "organizations.us-gov-west-1.amazonaws.com" + }, + "fips-aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "organizations.us-gov-west-1.amazonaws.com" } }, "isRegionalized" : false, @@ -6347,7 +7670,27 @@ }, "outposts" : { "endpoints" : { - "us-gov-east-1" : { }, + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "outposts.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "outposts.us-gov-west-1.amazonaws.com" + } + } + }, + "pinpoint" : { + "defaults" : { + "credentialScope" : { + "service" : "mobiletargeting" + } + }, + "endpoints" : { "us-gov-west-1" : { } } }, @@ -6370,6 +7713,18 @@ }, "rds" : { "endpoints" : { + "rds.us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "rds.us-gov-east-1.amazonaws.com" + }, + "rds.us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "rds.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -6392,6 +7747,12 @@ }, "rekognition" : { "endpoints" : { + "rekognition-fips.us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "rekognition-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-west-1" : { } } }, @@ -6511,15 +7872,41 @@ } } }, + "securityhub" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "securityhub-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "securityhub-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "serverlessrepo" : { "defaults" : { "protocols" : [ "https" ] }, "endpoints" : { "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "serverlessrepo.us-gov-east-1.amazonaws.com", "protocols" : [ "https" ] }, "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "serverlessrepo.us-gov-west-1.amazonaws.com", "protocols" : [ "https" ] } } @@ -6580,16 +7967,34 @@ }, "sns" : { "endpoints" : { - "us-gov-east-1" : { }, + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "sns.us-gov-east-1.amazonaws.com" + }, "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "sns.us-gov-west-1.amazonaws.com", "protocols" : [ "http", "https" ] } } }, "sqs" : { "endpoints" : { - "us-gov-east-1" : { }, + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "sqs.us-gov-east-1.amazonaws.com" + }, "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "sqs.us-gov-west-1.amazonaws.com", "protocols" : [ "http", "https" ], "sslCommonName" : "{region}.queue.{dnsSuffix}" } @@ -6597,6 +8002,30 @@ }, "ssm" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "ssm.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "ssm.us-gov-west-1.amazonaws.com" + }, + "ssm-facade-fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "ssm-facade.us-gov-east-1.amazonaws.com" + }, + "ssm-facade-fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "ssm-facade.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -6621,6 +8050,12 @@ }, "storagegateway" : { "endpoints" : { + "fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "storagegateway-fips.us-gov-west-1.amazonaws.com" + }, "us-gov-east-1" : { }, "us-gov-west-1" : { } } @@ -6651,7 +8086,19 @@ "sts" : { "endpoints" : { "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "sts.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "sts.us-gov-west-1.amazonaws.com" + } } }, "support" : { @@ -6661,6 +8108,12 @@ "region" : "us-gov-west-1" }, "hostname" : "support.us-gov-west-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "support.us-gov-west-1.amazonaws.com" } }, "partitionEndpoint" : "aws-us-gov-global" @@ -6816,6 +8269,14 @@ "us-iso-east-1" : { } } }, + "comprehend" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { } + } + }, "config" : { "endpoints" : { "us-iso-east-1" : { } @@ -6883,6 +8344,11 @@ } } }, + "es" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, "events" : { "endpoints" : { "us-iso-east-1" : { } @@ -7184,6 +8650,11 @@ "us-isob-east-1" : { } } }, + "lambda" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "license-manager" : { "endpoints" : { "us-isob-east-1" : { } diff --git a/core/regions/src/test/java/software/amazon/awssdk/regions/PartitionServiceMetadataTest.java b/core/regions/src/test/java/software/amazon/awssdk/regions/PartitionServiceMetadataTest.java index e7d7cee6826a..d82312850906 100644 --- a/core/regions/src/test/java/software/amazon/awssdk/regions/PartitionServiceMetadataTest.java +++ b/core/regions/src/test/java/software/amazon/awssdk/regions/PartitionServiceMetadataTest.java @@ -25,7 +25,7 @@ public class PartitionServiceMetadataTest { private static final List AWS_PARTITION_GLOBAL_SERVICES = Arrays.asList( - "budgets", "cloudfront", "iam", "route53", "waf"); + "budgets", "cloudfront", "iam", "route53", "shield", "waf"); private static final List AWS_PARTITION_REGIONALIZED_SERVICES = Arrays.asList( "acm", "apigateway", "application-autoscaling", "appstream2", "autoscaling", "batch", @@ -37,7 +37,7 @@ public class PartitionServiceMetadataTest { "iot", "kinesis", "kinesisanalytics", "kms", "lambda", "lightsail", "logs", "machinelearning", "marketplacecommerceanalytics", "metering.marketplace", "mobileanalytics", "monitoring", "opsworks", "opsworks-cm", "pinpoint", "polly", "rds", "redshift", "rekognition", "route53domains", "s3", - "sdb", "servicecatalog", "shield", "sms", "snowball", "sns", "sqs", "ssm", "states", "storagegateway", + "sdb", "servicecatalog", "sms", "snowball", "sns", "sqs", "ssm", "states", "storagegateway", "streams.dynamodb", "sts", "support", "swf", "waf-regional", "workspaces", "xray"); private static final List AWS_CN_PARTITION_GLOBAL_SERVICES = Arrays.asList("iam"); @@ -58,32 +58,38 @@ public class PartitionServiceMetadataTest { @Test public void endpointFor_ReturnsEndpoint_ForAllRegionalizedServices_When_AwsPartition() { - AWS_PARTITION_REGIONALIZED_SERVICES.forEach(s -> ServiceMetadata.of(s).endpointFor(Region.US_EAST_1)); + AWS_PARTITION_REGIONALIZED_SERVICES.forEach( + s -> assertThat(ServiceMetadata.of(s).endpointFor(Region.US_EAST_1)).isNotNull()); } @Test public void endpointFor_ReturnsEndpoint_ForAllGlobalServices_When_AwsGlobalRegion() { - AWS_PARTITION_GLOBAL_SERVICES.forEach(s -> ServiceMetadata.of(s).endpointFor(Region.AWS_GLOBAL)); + AWS_PARTITION_GLOBAL_SERVICES.forEach( + s -> assertThat(ServiceMetadata.of(s).endpointFor(Region.AWS_GLOBAL)).isNotNull()); } @Test public void endpointFor_ReturnsEndpoint_ForAllRegionalizedServices_When_AwsCnPartition() { - AWS_CN_PARTITION_REGIONALIZED_SERVICES.forEach(s -> ServiceMetadata.of(s).endpointFor(Region.CN_NORTH_1)); + AWS_CN_PARTITION_REGIONALIZED_SERVICES.forEach( + s -> assertThat(ServiceMetadata.of(s).endpointFor(Region.CN_NORTH_1)).isNotNull()); } @Test public void endpointFor_ReturnsEndpoint_ForAllGlobalServices_When_AwsCnGlobalRegion() { - AWS_CN_PARTITION_GLOBAL_SERVICES.forEach(s -> ServiceMetadata.of(s).endpointFor(Region.AWS_CN_GLOBAL)); + AWS_CN_PARTITION_GLOBAL_SERVICES.forEach( + s -> assertThat(ServiceMetadata.of(s).endpointFor(Region.AWS_CN_GLOBAL)).isNotNull()); } @Test public void endpointFor_ReturnsEndpoint_ForAllRegionalizedServices_When_AwsUsGovPartition() { - AWS_US_GOV_PARTITION_REGIONALIZED_SERVICES.forEach(s -> ServiceMetadata.of(s).endpointFor(Region.US_GOV_WEST_1)); + AWS_US_GOV_PARTITION_REGIONALIZED_SERVICES.forEach( + s -> assertThat(ServiceMetadata.of(s).endpointFor(Region.US_GOV_WEST_1)).isNotNull()); } @Test public void endpointFor_ReturnsEndpoint_ForAllGlobalServices_When_AwsUsGovGlobalRegion() { - AWS_US_GOV_PARTITION_GLOBAL_SERVICES.forEach(s -> ServiceMetadata.of(s).endpointFor(Region.AWS_US_GOV_GLOBAL)); + AWS_US_GOV_PARTITION_GLOBAL_SERVICES.forEach( + s -> assertThat(ServiceMetadata.of(s).endpointFor(Region.AWS_US_GOV_GLOBAL)).isNotNull()); } @Test diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index e4fcb78ac5da..9c2474cac430 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT sdk-core AWS Java SDK :: SDK Core @@ -41,6 +41,11 @@ http-client-spi ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + software.amazon.awssdk utils @@ -90,6 +95,11 @@ guava test + + org.apache.commons + commons-lang3 + test + log4j log4j @@ -167,6 +177,11 @@ ${awsjavasdk.version} test + + io.reactivex.rxjava2 + rxjava + test + diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java index 2800ba0ebc91..dd0e8e0bb8a5 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java @@ -26,8 +26,10 @@ import java.util.TreeMap; import java.util.function.Consumer; import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkPreviewApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.awssdk.utils.Validate; @@ -44,6 +46,7 @@ public abstract class RequestOverrideConfiguration { private final Duration apiCallTimeout; private final Duration apiCallAttemptTimeout; private final Signer signer; + private final List metricPublishers; protected RequestOverrideConfiguration(Builder builder) { this.headers = CollectionUtils.deepUnmodifiableMap(builder.headers(), () -> new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); @@ -52,6 +55,7 @@ protected RequestOverrideConfiguration(Builder builder) { this.apiCallTimeout = Validate.isPositiveOrNull(builder.apiCallTimeout(), "apiCallTimeout"); this.apiCallAttemptTimeout = Validate.isPositiveOrNull(builder.apiCallAttemptTimeout(), "apiCallAttemptTimeout"); this.signer = builder.signer(); + this.metricPublishers = Collections.unmodifiableList(new ArrayList<>(builder.metricPublishers())); } /** @@ -127,6 +131,14 @@ public Optional signer() { return Optional.ofNullable(signer); } + /** + * Return the metric publishers for publishing the metrics collected for this request. This list supersedes the + * metric publishers set on the client. + */ + public List metricPublishers() { + return metricPublishers; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -141,7 +153,8 @@ public boolean equals(Object o) { Objects.equals(apiNames, that.apiNames) && Objects.equals(apiCallTimeout, that.apiCallTimeout) && Objects.equals(apiCallAttemptTimeout, that.apiCallAttemptTimeout) && - Objects.equals(signer, that.signer); + Objects.equals(signer, that.signer) && + Objects.equals(metricPublishers, that.metricPublishers); } @Override @@ -153,6 +166,7 @@ public int hashCode() { hashCode = 31 * hashCode + Objects.hashCode(apiCallTimeout); hashCode = 31 * hashCode + Objects.hashCode(apiCallAttemptTimeout); hashCode = 31 * hashCode + Objects.hashCode(signer); + hashCode = 31 * hashCode + Objects.hashCode(metricPublishers); return hashCode; } @@ -339,6 +353,32 @@ default B putRawQueryParameter(String name, String value) { Signer signer(); + /** + * Sets the metric publishers for publishing the metrics collected for this request. This list supersedes + * the metric publisher set on the client. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + * + * @param metricPublisher The list metric publisher for this request. + * @return This object for method chaining. + */ + @SdkPreviewApi + B metricPublishers(List metricPublisher); + + /** + * Add a metric publisher to the existing list of previously set publishers to be used for publishing metrics + * for this request. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + * + * @param metricPublisher The metric publisher to add. + */ + @SdkPreviewApi + B addMetricPublisher(MetricPublisher metricPublisher); + + @SdkPreviewApi + List metricPublishers(); + /** * Create a new {@code SdkRequestOverrideConfiguration} with the properties set on this builder. * @@ -348,12 +388,13 @@ default B putRawQueryParameter(String name, String value) { } protected abstract static class BuilderImpl implements Builder { - private Map> headers = new HashMap<>(); + private Map> headers = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); private Map> rawQueryParameters = new HashMap<>(); private List apiNames = new ArrayList<>(); private Duration apiCallTimeout; private Duration apiCallAttemptTimeout; private Signer signer; + private List metricPublishers = new ArrayList<>(); protected BuilderImpl() { } @@ -366,7 +407,7 @@ protected BuilderImpl(RequestOverrideConfiguration sdkRequestOverrideConfig) { @Override public Map> headers() { - return CollectionUtils.deepUnmodifiableMap(headers, () -> new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); + return CollectionUtils.unmodifiableMapOfLists(headers); } @Override @@ -386,7 +427,7 @@ public B headers(Map> headers) { @Override public Map> rawQueryParameters() { - return CollectionUtils.deepUnmodifiableMap(rawQueryParameters); + return CollectionUtils.unmodifiableMapOfLists(rawQueryParameters); } @Override @@ -470,5 +511,28 @@ public void setSigner(Signer signer) { public Signer signer() { return signer; } + + @Override + public B metricPublishers(List metricPublishers) { + Validate.paramNotNull(metricPublishers, "metricPublishers"); + this.metricPublishers = new ArrayList<>(metricPublishers); + return (B) this; + } + + @Override + public B addMetricPublisher(MetricPublisher metricPublisher) { + Validate.paramNotNull(metricPublisher, "metricPublisher"); + this.metricPublishers.add(metricPublisher); + return (B) this; + } + + public void setMetricPublishers(List metricPublishers) { + metricPublishers(metricPublishers); + } + + @Override + public List metricPublishers() { + return metricPublishers; + } } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkPojoBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkPojoBuilder.java new file mode 100644 index 000000000000..7960092c1919 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkPojoBuilder.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.Buildable; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A builder for an immutable {@link SdkPojo} with no fields. + * + *

+ * This is useful for {@code SdkPojo} implementations that don't have their own builders, but need to be passed to something + * that assumes they already have a builder. For example, marshallers expect all {@code SdkPojo} implementations to have a + * builder. In the cases that they do not, this can be used as their builder. + * + *

+ * This currently only supports {@code SdkPojo}s without any fields (because it has no way to set them). It also does not support + * {@code SdkPojo}s that already have or are a builder (that builder should be used instead). + */ +@SdkProtectedApi +public final class SdkPojoBuilder implements SdkPojo, Buildable { + private final T delegate; + + public SdkPojoBuilder(T delegate) { + Validate.isTrue(delegate.sdkFields().isEmpty(), "Delegate must be empty."); + Validate.isTrue(!(delegate instanceof ToCopyableBuilder), "Delegate already has a builder."); + Validate.isTrue(!(delegate instanceof Buildable), "Delegate is already a builder."); + this.delegate = delegate; + } + + @Override + public List> sdkFields() { + return Collections.emptyList(); + } + + @Override + public boolean equalsBySdkFields(Object other) { + return delegate.equalsBySdkFields(other); + } + + @Override + public T build() { + return delegate; + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java index 0d66d6869113..5b3eaaf70e80 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java @@ -20,7 +20,6 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.Path; -import java.util.Arrays; import java.util.Optional; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; @@ -138,7 +137,7 @@ static AsyncRequestBody fromString(String string) { * @return AsyncRequestBody instance. */ static AsyncRequestBody fromBytes(byte[] bytes) { - return new ByteArrayAsyncRequestBody(Arrays.copyOf(bytes, bytes.length)); + return new ByteArrayAsyncRequestBody(bytes); } /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index 1be24c8bac0c..6725771e737b 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -28,6 +28,7 @@ import static software.amazon.awssdk.core.client.config.SdkClientOption.ASYNC_HTTP_CLIENT; import static software.amazon.awssdk.core.client.config.SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED; import static software.amazon.awssdk.core.client.config.SdkClientOption.EXECUTION_INTERCEPTORS; +import static software.amazon.awssdk.core.client.config.SdkClientOption.METRIC_PUBLISHERS; import static software.amazon.awssdk.core.client.config.SdkClientOption.PROFILE_FILE; import static software.amazon.awssdk.core.client.config.SdkClientOption.PROFILE_NAME; import static software.amazon.awssdk.core.client.config.SdkClientOption.RETRY_POLICY; @@ -66,6 +67,7 @@ import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.profiles.ProfileFileSystemSetting; import software.amazon.awssdk.utils.AttributeMap; @@ -364,6 +366,7 @@ public final B overrideConfiguration(ClientOverrideConfiguration overrideConfig) overrideConfig.advancedOption(DISABLE_HOST_PREFIX_INJECTION).orElse(null)); clientConfiguration.option(PROFILE_FILE, overrideConfig.defaultProfileFile().orElse(null)); clientConfiguration.option(PROFILE_NAME, overrideConfig.defaultProfileName().orElse(null)); + clientConfiguration.option(METRIC_PUBLISHERS, overrideConfig.metricPublishers()); return thisBuilder(); } @@ -391,6 +394,11 @@ public final B httpClientBuilder(SdkAsyncHttpClient.Builder httpClientBuilder) { return thisBuilder(); } + public final B metricPublishers(List metricPublishers) { + clientConfiguration.option(METRIC_PUBLISHERS, metricPublishers); + return thisBuilder(); + } + /** * Return "this" for method chaining. */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java index a9657812d55a..d3ef55915d95 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java @@ -24,11 +24,13 @@ import java.util.Optional; import java.util.TreeMap; import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPreviewApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.profiles.ProfileFileSystemSetting; import software.amazon.awssdk.utils.AttributeMap; @@ -55,6 +57,7 @@ public final class ClientOverrideConfiguration private final Duration apiCallTimeout; private final ProfileFile defaultProfileFile; private final String defaultProfileName; + private final List metricPublishers; /** * Initialize this configuration. Private to require use of {@link #builder()}. @@ -68,6 +71,7 @@ private ClientOverrideConfiguration(Builder builder) { this.apiCallAttemptTimeout = Validate.isPositiveOrNull(builder.apiCallAttemptTimeout(), "apiCallAttemptTimeout"); this.defaultProfileFile = builder.defaultProfileFile(); this.defaultProfileName = builder.defaultProfileName(); + this.metricPublishers = Collections.unmodifiableList(new ArrayList<>(builder.metricPublishers())); } @Override @@ -184,6 +188,15 @@ public Optional defaultProfileName() { return Optional.ofNullable(defaultProfileName); } + /** + * The metric publishers to use to publisher metrics collected for this client. + * + * @return The metric publisher. + */ + public List metricPublishers() { + return metricPublishers; + } + @Override public String toString() { return ToString.builder("ClientOverrideConfiguration") @@ -408,6 +421,31 @@ default Builder retryPolicy(RetryMode retryMode) { Builder defaultProfileName(String defaultProfileName); String defaultProfileName(); + + /** + * Set the Metric publishers to be use to publish metrics for this client. This overwrites the current list of + * metric publishers set on the builder. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + * + * @param metricPublishers The metric publishers. + */ + @SdkPreviewApi + Builder metricPublishers(List metricPublishers); + + /** + * Add a metric publisher to the existing list of previously set publishers to be used for publishing metrics + * for this client. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + * + * @param metricPublisher The metric publisher to add. + */ + @SdkPreviewApi + Builder addMetricPublisher(MetricPublisher metricPublisher); + + @SdkPreviewApi + List metricPublishers(); } /** @@ -422,6 +460,7 @@ private static final class DefaultClientOverrideConfigurationBuilder implements private Duration apiCallAttemptTimeout; private ProfileFile defaultProfileFile; private String defaultProfileName; + private List metricPublishers = new ArrayList<>(); @Override public Builder headers(Map> headers) { @@ -436,7 +475,7 @@ public void setHeaders(Map> additionalHttpHeaders) { @Override public Map> headers() { - return CollectionUtils.deepUnmodifiableMap(headers); + return CollectionUtils.unmodifiableMapOfLists(headers); } @Override @@ -563,6 +602,29 @@ public Builder defaultProfileName(String defaultProfileName) { return this; } + @Override + public Builder metricPublishers(List metricPublishers) { + Validate.paramNotNull(metricPublishers, "metricPublishers"); + this.metricPublishers = new ArrayList<>(metricPublishers); + return this; + } + + public void setMetricPublishers(List metricPublishers) { + metricPublishers(metricPublishers); + } + + @Override + public Builder addMetricPublisher(MetricPublisher metricPublisher) { + Validate.paramNotNull(metricPublisher, "metricPublisher"); + this.metricPublishers.add(metricPublisher); + return this; + } + + @Override + public List metricPublishers() { + return Collections.unmodifiableList(metricPublishers); + } + @Override public ClientOverrideConfiguration build() { return new ClientOverrideConfiguration(this); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java index 11c26163287d..ad422d83070e 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java @@ -27,6 +27,7 @@ import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.profiles.ProfileFile; /** @@ -129,6 +130,9 @@ public final class SdkClientOption extends ClientOption { */ public static final SdkClientOption PROFILE_NAME = new SdkClientOption<>(String.class); + public static final SdkClientOption> METRIC_PUBLISHERS = + new SdkClientOption<>(new UnsafeValueType(List.class)); + private SdkClientOption(Class valueClass) { super(valueClass); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java index 3d4b03dda61f..1b5320886fd2 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java @@ -25,6 +25,7 @@ import software.amazon.awssdk.core.http.HttpResponseHandler; import software.amazon.awssdk.core.runtime.transform.Marshaller; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.metrics.MetricCollector; /** * Encapsulates parameters needed for a particular API call. Captures input and output pojo types. @@ -47,6 +48,7 @@ public final class ClientExecutionParams { private String hostPrefixExpression; private String operationName; private URI discoveredEndpoint; + private MetricCollector metricCollector; public Marshaller getMarshaller() { return marshaller; @@ -166,4 +168,13 @@ public ClientExecutionParams discoveredEndpoint(URI discoveredE this.discoveredEndpoint = discoveredEndpoint; return this; } + + public ClientExecutionParams withMetricCollector(MetricCollector metricCollector) { + this.metricCollector = metricCollector; + return this; + } + + public MetricCollector getMetricCollector() { + return metricCollector; + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryCacheLoader.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryCacheLoader.java index d740af90701d..5f9279c9f60a 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryCacheLoader.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryCacheLoader.java @@ -18,10 +18,10 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.concurrent.CompletableFuture; -import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.exception.SdkClientException; -@SdkInternalApi +@SdkProtectedApi public interface EndpointDiscoveryCacheLoader { CompletableFuture discoverEndpoint(EndpointDiscoveryRequest endpointDiscoveryRequest); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryFailedException.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryFailedException.java new file mode 100644 index 000000000000..91ba0a8701ec --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryFailedException.java @@ -0,0 +1,88 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.endpointdiscovery; + +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.utils.Validate; + +/** + * This exception is thrown when the SDK was unable to retrieve an endpoint from AWS. The cause describes what specific part of + * the endpoint discovery process failed. + */ +@SdkPublicApi +public class EndpointDiscoveryFailedException extends SdkClientException { + + private static final long serialVersionUID = 1L; + + private EndpointDiscoveryFailedException(Builder b) { + super(b); + Validate.paramNotNull(b.cause(), "cause"); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public static EndpointDiscoveryFailedException create(Throwable cause) { + return builder().message("Failed when retrieving a required endpoint from AWS.") + .cause(cause) + .build(); + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public interface Builder extends SdkClientException.Builder { + @Override + Builder message(String message); + + @Override + Builder cause(Throwable cause); + + @Override + EndpointDiscoveryFailedException build(); + } + + protected static final class BuilderImpl extends SdkClientException.BuilderImpl implements Builder { + + protected BuilderImpl() { + } + + protected BuilderImpl(EndpointDiscoveryFailedException ex) { + super(ex); + } + + @Override + public Builder message(String message) { + this.message = message; + return this; + } + + @Override + public Builder cause(Throwable cause) { + this.cause = cause; + return this; + } + + @Override + public EndpointDiscoveryFailedException build() { + return new EndpointDiscoveryFailedException(this); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryRefreshCache.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryRefreshCache.java index 3730803d55b1..982b3427216d 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryRefreshCache.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/endpointdiscovery/EndpointDiscoveryRefreshCache.java @@ -20,14 +20,11 @@ import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; import software.amazon.awssdk.annotations.SdkProtectedApi; -import software.amazon.awssdk.utils.Logger; @SdkProtectedApi public final class EndpointDiscoveryRefreshCache { - - private static final Logger log = Logger.loggerFor(EndpointDiscoveryRefreshCache.class); - private final Map cache = new ConcurrentHashMap<>(); private final EndpointDiscoveryCacheLoader client; @@ -63,7 +60,7 @@ public URI get(String accessKey, EndpointDiscoveryRequest request) { if (endpoint == null) { if (request.required()) { - return cache.computeIfAbsent(key, k -> discoverEndpoint(request).join()).endpoint(); + return cache.computeIfAbsent(key, k -> getAndJoin(request)).endpoint(); } else { EndpointDiscoveryEndpoint tempEndpoint = EndpointDiscoveryEndpoint.builder() .endpoint(request.defaultEndpoint()) @@ -90,6 +87,17 @@ public URI get(String accessKey, EndpointDiscoveryRequest request) { return endpoint.endpoint(); } + private EndpointDiscoveryEndpoint getAndJoin(EndpointDiscoveryRequest request) { + try { + return discoverEndpoint(request).get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw EndpointDiscoveryFailedException.create(e); + } catch (ExecutionException e) { + throw EndpointDiscoveryFailedException.create(e.getCause()); + } + } + private void refreshCacheAsync(EndpointDiscoveryRequest request, String key) { discoverEndpoint(request).thenApply(v -> cache.put(key, v)); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/exception/SdkServiceException.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/exception/SdkServiceException.java index ddf13cf5a15f..134e0b501f25 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/exception/SdkServiceException.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/exception/SdkServiceException.java @@ -41,11 +41,13 @@ public class SdkServiceException extends SdkException implements SdkPojo { private final String requestId; + private final String extendedRequestId; private final int statusCode; protected SdkServiceException(Builder b) { super(b); this.requestId = b.requestId(); + this.extendedRequestId = b.extendedRequestId(); this.statusCode = b.statusCode(); } @@ -57,6 +59,14 @@ public String requestId() { return requestId; } + /** + * The extendedRequestId that was returned by the called service. + * @return String ctontaining the extendedRequestId + */ + public String extendedRequestId() { + return extendedRequestId; + } + /** * The status code that was returned by the called service. * @return int containing the status code. @@ -127,6 +137,21 @@ public interface Builder extends SdkException.Builder, SdkPojo { */ String requestId(); + /** + * Specifies the extendedRequestId returned by the called service. + * + * @param extendedRequestId A string that identifies the request made to a service. + * @return This object for method chaining. + */ + Builder extendedRequestId(String extendedRequestId); + + /** + * The extendedRequestId returned by the called service. + * + * @return String containing the extendedRequestId + */ + String extendedRequestId(); + /** * Specifies the status code returned by the service. * @@ -153,6 +178,7 @@ public interface Builder extends SdkException.Builder, SdkPojo { protected static class BuilderImpl extends SdkException.BuilderImpl implements Builder { protected String requestId; + protected String extendedRequestId; protected int statusCode; protected BuilderImpl() { @@ -161,6 +187,7 @@ protected BuilderImpl() { protected BuilderImpl(SdkServiceException ex) { super(ex); this.requestId = ex.requestId(); + this.extendedRequestId = ex.extendedRequestId(); this.statusCode = ex.statusCode(); } @@ -182,6 +209,12 @@ public Builder requestId(String requestId) { return this; } + @Override + public Builder extendedRequestId(String extendedRequestId) { + this.extendedRequestId = extendedRequestId; + return this; + } + @Override public String requestId() { return requestId; @@ -195,6 +228,19 @@ public void setRequestId(String requestId) { this.requestId = requestId; } + @Override + public String extendedRequestId() { + return extendedRequestId; + } + + public String getExtendedRequestId() { + return extendedRequestId; + } + + public void setExtendedRequestId(String extendedRequestId) { + this.extendedRequestId = extendedRequestId; + } + @Override public Builder statusCode(int statusCode) { this.statusCode = statusCode; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/http/ExecutionContext.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/http/ExecutionContext.java index e9375a75e14d..37fb230b5c9c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/http/ExecutionContext.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/http/ExecutionContext.java @@ -21,6 +21,7 @@ import software.amazon.awssdk.core.interceptor.ExecutionInterceptorChain; import software.amazon.awssdk.core.interceptor.InterceptorContext; import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.utils.builder.CopyableBuilder; import software.amazon.awssdk.utils.builder.ToCopyableBuilder; @@ -34,12 +35,14 @@ public final class ExecutionContext implements ToCopyableBuilder { - String X_AMZN_REQUEST_ID_HEADER = "x-amzn-RequestId"; - + String X_AMZN_REQUEST_ID_HEADER_ALTERNATE = "x-amz-request-id"; + Set X_AMZN_REQUEST_ID_HEADERS = Collections.unmodifiableSet(Stream.of(X_AMZN_REQUEST_ID_HEADER, + X_AMZN_REQUEST_ID_HEADER_ALTERNATE) + .collect(Collectors.toSet())); String X_AMZ_ID_2_HEADER = "x-amz-id-2"; /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/http/MetricCollectingHttpResponseHandler.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/http/MetricCollectingHttpResponseHandler.java new file mode 100644 index 000000000000..5a801c865bd4 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/http/MetricCollectingHttpResponseHandler.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.http; + +import java.time.Duration; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.core.internal.util.MetricUtils; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.utils.Pair; + +/** + * An implementation of {@link HttpResponseHandler} that publishes the time it took to execute { + * @link #handle(SdkHttpFullResponse, ExecutionAttributes)} as the provieded duration metric to the + * {@link SdkExecutionAttribute#API_CALL_ATTEMPT_METRIC_COLLECTOR}. + */ +@SdkProtectedApi +public final class MetricCollectingHttpResponseHandler implements HttpResponseHandler { + public final SdkMetric metric; + public final HttpResponseHandler delegateToTime; + + private MetricCollectingHttpResponseHandler(SdkMetric durationMetric, + HttpResponseHandler delegateToTime) { + this.metric = durationMetric; + this.delegateToTime = delegateToTime; + } + + public static MetricCollectingHttpResponseHandler create(SdkMetric durationMetric, + HttpResponseHandler delegateToTime) { + return new MetricCollectingHttpResponseHandler<>(durationMetric, delegateToTime); + } + + @Override + public T handle(SdkHttpFullResponse response, ExecutionAttributes executionAttributes) throws Exception { + Pair result = MetricUtils.measureDurationUnsafe(() -> delegateToTime.handle(response, executionAttributes)); + + collector(executionAttributes).ifPresent(c -> c.reportMetric(metric, result.right())); + + return result.left(); + } + + private Optional collector(ExecutionAttributes attributes) { + if (attributes == null) { + return Optional.empty(); + } + + return Optional.ofNullable(attributes.getAttribute(SdkExecutionAttribute.API_CALL_ATTEMPT_METRIC_COLLECTOR)); + } + + @Override + public boolean needsConnectionLeftOpen() { + return delegateToTime.needsConnectionLeftOpen(); + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java index 37b83a6a893e..6aa85ae6c70f 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java @@ -19,6 +19,7 @@ import software.amazon.awssdk.core.ClientType; import software.amazon.awssdk.core.ServiceConfiguration; import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.metrics.MetricCollector; /** * Contains attributes attached to the execution. This information is available to {@link ExecutionInterceptor}s and @@ -46,6 +47,13 @@ public class SdkExecutionAttribute { public static final ExecutionAttribute OPERATION_NAME = new ExecutionAttribute<>("OperationName"); + /** + * The {@link MetricCollector} associated with the current, ongoing API call attempt. This is not set until the actual + * internal API call attempt starts. + */ + public static final ExecutionAttribute API_CALL_ATTEMPT_METRIC_COLLECTOR = + new ExecutionAttribute<>("ApiCallAttemptMetricCollector"); + /** * If true indicates that the configured endpoint of the client is a value that was supplied as an override and not * generated from regional metadata. diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java index b6843860a056..d342656dbb95 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java @@ -143,12 +143,12 @@ public void completed(Integer result, ByteBuffer attachment) { performWrite(byteBuffer); } else { synchronized (FileSubscriber.this) { + writeInProgress = false; if (closeOnLastWrite) { close(); } else { subscription.request(1); } - writeInProgress = false; } } } @@ -194,4 +194,4 @@ public String toString() { return getClass() + ":" + path.toString(); } } -} +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseAsyncClientHandler.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseAsyncClientHandler.java index 925d2750a1b4..2006288de580 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseAsyncClientHandler.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseAsyncClientHandler.java @@ -20,6 +20,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.function.Function; +import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.Response; import software.amazon.awssdk.core.SdkRequest; @@ -44,9 +45,11 @@ import software.amazon.awssdk.core.internal.http.async.AsyncStreamingResponseHandler; import software.amazon.awssdk.core.internal.http.async.CombinedResponseAsyncHttpResponseHandler; import software.amazon.awssdk.core.internal.util.ThrowableUtils; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.utils.CompletableFutureUtils; import software.amazon.awssdk.utils.Logger; @@ -69,21 +72,23 @@ protected BaseAsyncClientHandler(SdkClientConfiguration clientConfiguration, public CompletableFuture execute( ClientExecutionParams executionParams) { - validateExecutionParams(executionParams); - ExecutionContext executionContext = createExecutionContext(executionParams, createInitialExecutionAttributes()); - TransformingAsyncResponseHandler> combinedResponseHandler; - - /* Decorate and combine provided response handlers into a single decorated response handler */ - if (executionParams.getCombinedResponseHandler() == null) { - combinedResponseHandler = createDecoratedHandler(executionParams.getResponseHandler(), - executionParams.getErrorResponseHandler(), - executionContext); - } else { - combinedResponseHandler = createDecoratedHandler(executionParams.getCombinedResponseHandler(), - executionContext); - } + return measureApiCallSuccess(executionParams, () -> { + validateExecutionParams(executionParams); + ExecutionContext executionContext = createExecutionContext(executionParams, createInitialExecutionAttributes()); + TransformingAsyncResponseHandler> combinedResponseHandler; + + /* Decorate and combine provided response handlers into a single decorated response handler */ + if (executionParams.getCombinedResponseHandler() == null) { + combinedResponseHandler = createDecoratedHandler(executionParams.getResponseHandler(), + executionParams.getErrorResponseHandler(), + executionContext); + } else { + combinedResponseHandler = createDecoratedHandler(executionParams.getCombinedResponseHandler(), + executionContext); + } - return doExecute(executionParams, executionContext, combinedResponseHandler); + return doExecute(executionParams, executionContext, combinedResponseHandler); + }); } @Override @@ -91,46 +96,48 @@ public Complet ClientExecutionParams executionParams, AsyncResponseTransformer asyncResponseTransformer) { - validateExecutionParams(executionParams); + return measureApiCallSuccess(executionParams, () -> { + validateExecutionParams(executionParams); - if (executionParams.getCombinedResponseHandler() != null) { - // There is no support for catching errors in a body for streaming responses. Our codegen must never - // attempt to do this. - throw new IllegalArgumentException("A streaming 'asyncResponseTransformer' may not be used when a " - + "'combinedResponseHandler' has been specified in a " - + "ClientExecutionParams object."); - } + if (executionParams.getCombinedResponseHandler() != null) { + // There is no support for catching errors in a body for streaming responses. Our codegen must never + // attempt to do this. + throw new IllegalArgumentException("A streaming 'asyncResponseTransformer' may not be used when a " + + "'combinedResponseHandler' has been specified in a " + + "ClientExecutionParams object."); + } - ExecutionAttributes executionAttributes = createInitialExecutionAttributes(); + ExecutionAttributes executionAttributes = createInitialExecutionAttributes(); - AsyncStreamingResponseHandler asyncStreamingResponseHandler = - new AsyncStreamingResponseHandler<>(asyncResponseTransformer); + AsyncStreamingResponseHandler asyncStreamingResponseHandler = + new AsyncStreamingResponseHandler<>(asyncResponseTransformer); - // For streaming requests, prepare() should be called as early as possible to avoid NPE in client - // See https://github.com/aws/aws-sdk-java-v2/issues/1268. We do this with a wrapper that caches the prepare - // result until the execution attempt number changes. This guarantees that prepare is only called once per - // execution. - TransformingAsyncResponseHandler wrappedAsyncStreamingResponseHandler = - IdempotentAsyncResponseHandler.create( - asyncStreamingResponseHandler, - () -> executionAttributes.getAttribute(InternalCoreExecutionAttribute.EXECUTION_ATTEMPT), - Integer::equals); - wrappedAsyncStreamingResponseHandler.prepare(); + // For streaming requests, prepare() should be called as early as possible to avoid NPE in client + // See https://github.com/aws/aws-sdk-java-v2/issues/1268. We do this with a wrapper that caches the prepare + // result until the execution attempt number changes. This guarantees that prepare is only called once per + // execution. + TransformingAsyncResponseHandler wrappedAsyncStreamingResponseHandler = + IdempotentAsyncResponseHandler.create( + asyncStreamingResponseHandler, + () -> executionAttributes.getAttribute(InternalCoreExecutionAttribute.EXECUTION_ATTEMPT), + Integer::equals); + wrappedAsyncStreamingResponseHandler.prepare(); - ExecutionContext context = createExecutionContext(executionParams, executionAttributes); + ExecutionContext context = createExecutionContext(executionParams, executionAttributes); - HttpResponseHandler decoratedResponseHandlers = - decorateResponseHandlers(executionParams.getResponseHandler(), context); + HttpResponseHandler decoratedResponseHandlers = + decorateResponseHandlers(executionParams.getResponseHandler(), context); - asyncStreamingResponseHandler.responseHandler(decoratedResponseHandlers); + asyncStreamingResponseHandler.responseHandler(decoratedResponseHandlers); - TransformingAsyncResponseHandler errorHandler = - resolveErrorResponseHandler(executionParams.getErrorResponseHandler(), context, crc32Validator); + TransformingAsyncResponseHandler errorHandler = + resolveErrorResponseHandler(executionParams.getErrorResponseHandler(), context, crc32Validator); - TransformingAsyncResponseHandler> combinedResponseHandler = - new CombinedResponseAsyncHttpResponseHandler<>(wrappedAsyncStreamingResponseHandler, errorHandler); + TransformingAsyncResponseHandler> combinedResponseHandler = + new CombinedResponseAsyncHttpResponseHandler<>(wrappedAsyncStreamingResponseHandler, errorHandler); - return doExecute(executionParams, context, combinedResponseHandler); + return doExecute(executionParams, context, combinedResponseHandler); + }); } /** @@ -261,4 +268,28 @@ private CompletableFuture invoke( .executionContext(executionContext) .execute(responseHandler); } + + private CompletableFuture measureApiCallSuccess(ClientExecutionParams executionParams, + Supplier> apiCall) { + try { + CompletableFuture apiCallResult = apiCall.get(); + CompletableFuture outputFuture = + apiCallResult.whenComplete((r, t) -> reportApiCallSuccess(executionParams, t == null)); + + // Preserve cancellations on the output future, by passing cancellations of the output future to the api call future. + CompletableFutureUtils.forwardExceptionTo(outputFuture, apiCallResult); + + return outputFuture; + } catch (Exception e) { + reportApiCallSuccess(executionParams, false); + throw e; + } + } + + private void reportApiCallSuccess(ClientExecutionParams executionParams, boolean value) { + MetricCollector metricCollector = executionParams.getMetricCollector(); + if (metricCollector != null) { + metricCollector.reportMetric(CoreMetric.API_CALL_SUCCESSFUL, value); + } + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseClientHandler.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseClientHandler.java index 2e3e2c773b10..f85574acca0c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseClientHandler.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseClientHandler.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.core.internal.handler; import java.net.URI; +import java.time.Duration; import java.util.function.BiFunction; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.Response; @@ -32,8 +33,12 @@ import software.amazon.awssdk.core.interceptor.InterceptorContext; import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.internal.InternalCoreExecutionAttribute; +import software.amazon.awssdk.core.internal.util.MetricUtils; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.Pair; import software.amazon.awssdk.utils.StringUtils; @SdkInternalApi @@ -65,7 +70,14 @@ static InterceptorContext finalizeSdkHttpFu SdkClientConfiguration clientConfiguration) { runBeforeMarshallingInterceptors(executionContext); - SdkHttpFullRequest request = executionParams.getMarshaller().marshall(inputT); + + Pair measuredMarshall = MetricUtils.measureDuration(() -> + executionParams.getMarshaller().marshall(inputT)); + + executionContext.metricCollector().reportMetric(CoreMetric.MARSHALLING_DURATION, measuredMarshall.right()); + + SdkHttpFullRequest request = measuredMarshall.left(); + request = modifyEndpointHostIfNeeded(request, clientConfiguration, executionParams); addHttpRequest(executionContext, request); @@ -177,6 +189,8 @@ protected ExecutionCont ExecutionInterceptorChain interceptorChain = new ExecutionInterceptorChain(clientConfiguration.option(SdkClientOption.EXECUTION_INTERCEPTORS)); + MetricCollector metricCollector = resolveMetricCollector(params); + return ExecutionContext.builder() .interceptorChain(interceptorChain) .interceptorContext(InterceptorContext.builder() @@ -184,6 +198,7 @@ protected ExecutionCont .build()) .executionAttributes(executionAttributes) .signer(clientConfiguration.option(SdkAdvancedClientOption.SIGNER)) + .metricCollector(metricCollector) .build(); } @@ -271,4 +286,12 @@ private static BiFunction composeResponseFunctions(BiFunction function2) { return (x, y) -> function2.apply(function1.apply(x, y), y); } + + private MetricCollector resolveMetricCollector(ClientExecutionParams params) { + MetricCollector metricCollector = params.getMetricCollector(); + if (metricCollector == null) { + metricCollector = MetricCollector.create("ApiCall"); + } + return metricCollector; + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseSyncClientHandler.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseSyncClientHandler.java index 33b868042b9d..e035e844a753 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseSyncClientHandler.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/handler/BaseSyncClientHandler.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.core.internal.handler; import java.util.Optional; +import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.Response; import software.amazon.awssdk.core.SdkRequest; @@ -33,11 +34,13 @@ import software.amazon.awssdk.core.internal.http.AmazonSyncHttpClient; import software.amazon.awssdk.core.internal.http.CombinedResponseHandler; import software.amazon.awssdk.core.internal.http.InterruptMonitor; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.http.AbortableInputStream; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollector; @SdkInternalApi public abstract class BaseSyncClientHandler extends BaseClientHandler implements SyncClientHandler { @@ -56,49 +59,53 @@ public ReturnT ClientExecutionParams executionParams, ResponseTransformer responseTransformer) { - validateExecutionParams(executionParams); + return measureApiCallSuccess(executionParams, () -> { + validateExecutionParams(executionParams); - if (executionParams.getCombinedResponseHandler() != null) { - // There is no support for catching errors in a body for streaming responses - throw new IllegalArgumentException("A streaming 'responseTransformer' may not be used when a " - + "'combinedResponseHandler' has been specified in a " - + "ClientExecutionParams object."); - } + if (executionParams.getCombinedResponseHandler() != null) { + // There is no support for catching errors in a body for streaming responses + throw new IllegalArgumentException("A streaming 'responseTransformer' may not be used when a " + + "'combinedResponseHandler' has been specified in a " + + "ClientExecutionParams object."); + } - ExecutionContext executionContext = createExecutionContext(executionParams, createInitialExecutionAttributes()); + ExecutionContext executionContext = createExecutionContext(executionParams, createInitialExecutionAttributes()); - HttpResponseHandler decoratedResponseHandlers = - decorateResponseHandlers(executionParams.getResponseHandler(), executionContext); + HttpResponseHandler decoratedResponseHandlers = + decorateResponseHandlers(executionParams.getResponseHandler(), executionContext); - HttpResponseHandler httpResponseHandler = - new HttpResponseHandlerAdapter<>(decoratedResponseHandlers, responseTransformer); + HttpResponseHandler httpResponseHandler = + new HttpResponseHandlerAdapter<>(decoratedResponseHandlers, responseTransformer); - return doExecute( - executionParams, - executionContext, - new CombinedResponseHandler<>(httpResponseHandler, executionParams.getErrorResponseHandler())); + return doExecute( + executionParams, + executionContext, + new CombinedResponseHandler<>(httpResponseHandler, executionParams.getErrorResponseHandler())); + }); } @Override public OutputT execute( ClientExecutionParams executionParams) { - validateExecutionParams(executionParams); - ExecutionContext executionContext = createExecutionContext(executionParams, createInitialExecutionAttributes()); - HttpResponseHandler> combinedResponseHandler; + return measureApiCallSuccess(executionParams, () -> { + validateExecutionParams(executionParams); + ExecutionContext executionContext = createExecutionContext(executionParams, createInitialExecutionAttributes()); + HttpResponseHandler> combinedResponseHandler; - if (executionParams.getCombinedResponseHandler() != null) { - combinedResponseHandler = decorateSuccessResponseHandlers(executionParams.getCombinedResponseHandler(), - executionContext); - } else { - HttpResponseHandler decoratedResponseHandlers = - decorateResponseHandlers(executionParams.getResponseHandler(), executionContext); + if (executionParams.getCombinedResponseHandler() != null) { + combinedResponseHandler = decorateSuccessResponseHandlers(executionParams.getCombinedResponseHandler(), + executionContext); + } else { + HttpResponseHandler decoratedResponseHandlers = + decorateResponseHandlers(executionParams.getResponseHandler(), executionContext); - combinedResponseHandler = new CombinedResponseHandler<>(decoratedResponseHandlers, - executionParams.getErrorResponseHandler()); - } + combinedResponseHandler = new CombinedResponseHandler<>(decoratedResponseHandlers, + executionParams.getErrorResponseHandler()); + } - return doExecute(executionParams, executionContext, combinedResponseHandler); + return doExecute(executionParams, executionContext, combinedResponseHandler); + }); } @Override @@ -150,6 +157,24 @@ private ReturnT doExecute( responseHandler); } + private T measureApiCallSuccess(ClientExecutionParams executionParams, Supplier thingToMeasureSuccessOf) { + try { + T result = thingToMeasureSuccessOf.get(); + reportApiCallSuccess(executionParams, true); + return result; + } catch (Exception e) { + reportApiCallSuccess(executionParams, false); + throw e; + } + } + + private void reportApiCallSuccess(ClientExecutionParams executionParams, boolean value) { + MetricCollector metricCollector = executionParams.getMetricCollector(); + if (metricCollector != null) { + metricCollector.reportMetric(CoreMetric.API_CALL_SUCCESSFUL, value); + } + } + private static class HttpResponseHandlerAdapter implements HttpResponseHandler { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java index d1784ebf9058..d835d5b4a0c0 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java @@ -30,6 +30,8 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.AfterExecutionInterceptorsStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyTransactionIdStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyUserAgentStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncApiCallAttemptMetricCollectionStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncApiCallMetricCollectionStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncApiCallTimeoutTrackingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncExecutionFailureExceptionReportingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncRetryableStage; @@ -172,11 +174,13 @@ public CompletableFuture execute( .first(SigningStage::new) .then(BeforeTransmissionExecutionInterceptorsStage::new) .then(d -> new MakeAsyncHttpRequestStage<>(responseHandler, d)) + .wrappedWith(AsyncApiCallAttemptMetricCollectionStage::new) .wrappedWith((deps, wrapped) -> new AsyncRetryableStage<>(responseHandler, deps, wrapped)) .then(async(() -> new UnwrapResponseContainer<>())) .then(async(() -> new AfterExecutionInterceptorsStage<>())) .wrappedWith(AsyncExecutionFailureExceptionReportingStage::new) - .wrappedWith(AsyncApiCallTimeoutTrackingStage::new)::build)::build) + .wrappedWith(AsyncApiCallTimeoutTrackingStage::new) + .wrappedWith(AsyncApiCallMetricCollectionStage::new)::build)::build) .build(httpClientDependencies) .execute(request, createRequestExecutionDependencies()); } catch (RuntimeException e) { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java index 38feba879847..91e24c798f31 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java @@ -27,7 +27,9 @@ import software.amazon.awssdk.core.internal.http.pipeline.RequestPipelineBuilder; import software.amazon.awssdk.core.internal.http.pipeline.stages.AfterExecutionInterceptorsStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AfterTransmissionExecutionInterceptorsStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.ApiCallAttemptMetricCollectionStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.ApiCallAttemptTimeoutTrackingStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.ApiCallMetricCollectionStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.ApiCallTimeoutTrackingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyTransactionIdStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyUserAgentStage; @@ -179,9 +181,11 @@ public OutputT execute(HttpResponseHandler> response .then(() -> new HandleResponseStage<>(responseHandler)) .wrappedWith(ApiCallAttemptTimeoutTrackingStage::new) .wrappedWith(TimeoutExceptionHandlingStage::new) + .wrappedWith((deps, wrapped) -> new ApiCallAttemptMetricCollectionStage<>(wrapped)) .wrappedWith(RetryableStage::new)::build) .wrappedWith(StreamManagingStage::new) .wrappedWith(ApiCallTimeoutTrackingStage::new)::build) + .wrappedWith((deps, wrapped) -> new ApiCallMetricCollectionStage<>(wrapped)) .then(() -> new UnwrapResponseContainer<>()) .then(() -> new AfterExecutionInterceptorsStage<>()) .wrappedWith(ExecutionFailureExceptionReportingStage::new) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/RequestExecutionContext.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/RequestExecutionContext.java index b84afb9512dc..73d9f2ea94f4 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/RequestExecutionContext.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/RequestExecutionContext.java @@ -23,9 +23,11 @@ import software.amazon.awssdk.core.http.ExecutionContext; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptorChain; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; import software.amazon.awssdk.core.internal.http.timers.TimeoutTracker; import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.utils.Validate; /** @@ -41,6 +43,7 @@ public final class RequestExecutionContext { private final ExecutionContext executionContext; private TimeoutTracker apiCallTimeoutTracker; private TimeoutTracker apiCallAttemptTimeoutTracker; + private MetricCollector attemptMetricCollector; private RequestExecutionContext(Builder builder) { this.requestProvider = builder.requestProvider; @@ -115,6 +118,15 @@ public void apiCallAttemptTimeoutTracker(TimeoutTracker timeoutTracker) { this.apiCallAttemptTimeoutTracker = timeoutTracker; } + public MetricCollector attemptMetricCollector() { + return attemptMetricCollector; + } + + public void attemptMetricCollector(MetricCollector metricCollector) { + executionAttributes().putAttribute(SdkExecutionAttribute.API_CALL_ATTEMPT_METRIC_COLLECTOR, metricCollector); + this.attemptMetricCollector = metricCollector; + } + /** * Sets the request body provider. * Used for transforming the original body provider to sign events for diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallAttemptMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallAttemptMetricCollectionStage.java new file mode 100644 index 000000000000..329b302ccba2 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallAttemptMetricCollectionStage.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages; + +import static software.amazon.awssdk.core.internal.util.MetricUtils.collectHttpMetrics; +import static software.amazon.awssdk.core.internal.util.MetricUtils.createAttemptMetricsCollector; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; +import software.amazon.awssdk.core.internal.http.pipeline.RequestToResponsePipeline; +import software.amazon.awssdk.core.internal.http.pipeline.stages.utils.RetryableStageHelper; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; + +/** + * Wrapper pipeline that initializes and tracks the API call attempt metric collection. This wrapper and any wrapped + * stages will track API call attempt metrics. + */ +@SdkInternalApi +public final class ApiCallAttemptMetricCollectionStage implements RequestToResponsePipeline { + private final RequestPipeline> wrapped; + + public ApiCallAttemptMetricCollectionStage(RequestPipeline> wrapped) { + this.wrapped = wrapped; + } + + @Override + public Response execute(SdkHttpFullRequest input, RequestExecutionContext context) throws Exception { + MetricCollector apiCallAttemptMetrics = createAttemptMetricsCollector(context); + context.attemptMetricCollector(apiCallAttemptMetrics); + reportBackoffDelay(context); + + Response response = wrapped.execute(input, context); + + collectHttpMetrics(apiCallAttemptMetrics, response.httpResponse()); + + return response; + } + + private void reportBackoffDelay(RequestExecutionContext context) { + Duration lastBackoffDelay = context.executionAttributes().getAttribute(RetryableStageHelper.LAST_BACKOFF_DELAY_DURATION); + if (lastBackoffDelay != null) { + context.attemptMetricCollector().reportMetric(CoreMetric.BACKOFF_DELAY_DURATION, lastBackoffDelay); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java new file mode 100644 index 000000000000..a54fd9678376 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java @@ -0,0 +1,54 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; +import software.amazon.awssdk.core.internal.http.pipeline.RequestToResponsePipeline; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; + +/** + * Wrapper pipeline that tracks the {@link CoreMetric#API_CALL_DURATION} metric. + */ +@SdkInternalApi +public class ApiCallMetricCollectionStage implements RequestToResponsePipeline { + private final RequestPipeline> wrapped; + + public ApiCallMetricCollectionStage(RequestPipeline> wrapped) { + this.wrapped = wrapped; + } + + @Override + public Response execute(SdkHttpFullRequest input, RequestExecutionContext context) throws Exception { + MetricCollector metricCollector = context.executionContext().metricCollector(); + + // Note: at this point, any exception, even a service exception, will + // be thrown from the wrapped pipeline so we can't use + // MetricUtil.measureDuration() + long callStart = System.nanoTime(); + try { + return wrapped.execute(input, context); + } finally { + long d = System.nanoTime() - callStart; + metricCollector.reportMetric(CoreMetric.API_CALL_DURATION, Duration.ofNanos(d)); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallAttemptMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallAttemptMetricCollectionStage.java new file mode 100644 index 000000000000..ddb1c66643e2 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallAttemptMetricCollectionStage.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages; + +import static software.amazon.awssdk.core.internal.util.MetricUtils.collectHttpMetrics; +import static software.amazon.awssdk.core.internal.util.MetricUtils.createAttemptMetricsCollector; + +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; +import software.amazon.awssdk.core.internal.http.pipeline.stages.utils.RetryableStageHelper; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; + +/** + * Wrapper pipeline that initializes and tracks the API call attempt metric collection. This wrapper and any wrapped + * stages will track API call attempt metrics. + */ +@SdkInternalApi +public final class AsyncApiCallAttemptMetricCollectionStage implements RequestPipeline>> { + private final RequestPipeline>> wrapped; + + public AsyncApiCallAttemptMetricCollectionStage(RequestPipeline>> wrapped) { + this.wrapped = wrapped; + } + + @Override + public CompletableFuture> execute(SdkHttpFullRequest input, + RequestExecutionContext context) throws Exception { + + MetricCollector apiCallAttemptMetrics = createAttemptMetricsCollector(context); + context.attemptMetricCollector(apiCallAttemptMetrics); + reportBackoffDelay(context); + + CompletableFuture> executeFuture = wrapped.execute(input, context); + + executeFuture.whenComplete((r, t) -> { + if (t == null) { + collectHttpMetrics(apiCallAttemptMetrics, r.httpResponse()); + } + }); + + return executeFuture; + } + + private void reportBackoffDelay(RequestExecutionContext context) { + Duration lastBackoffDelay = context.executionAttributes().getAttribute(RetryableStageHelper.LAST_BACKOFF_DELAY_DURATION); + if (lastBackoffDelay != null) { + context.attemptMetricCollector().reportMetric(CoreMetric.BACKOFF_DELAY_DURATION, lastBackoffDelay); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java new file mode 100644 index 000000000000..3d57cedea52d --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages; + +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.CompletableFutureUtils; + +/** + * Wrapper pipeline that tracks the {@link CoreMetric#API_CALL_DURATION} metric. + */ +@SdkInternalApi +public final class AsyncApiCallMetricCollectionStage implements RequestPipeline> { + private final RequestPipeline> wrapped; + + public AsyncApiCallMetricCollectionStage(RequestPipeline> wrapped) { + this.wrapped = wrapped; + } + + @Override + public CompletableFuture execute(SdkHttpFullRequest input, RequestExecutionContext context) throws Exception { + MetricCollector metricCollector = context.executionContext().metricCollector(); + + CompletableFuture future = new CompletableFuture<>(); + + long callStart = System.nanoTime(); + CompletableFuture executeFuture = wrapped.execute(input, context); + + executeFuture.whenComplete((r, t) -> { + long duration = System.nanoTime() - callStart; + metricCollector.reportMetric(CoreMetric.API_CALL_DURATION, Duration.ofNanos(duration)); + + if (t != null) { + future.completeExceptionally(t); + } else { + future.complete(r); + } + }); + + return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStage.java index de964572c3ea..31a38f1f294d 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStage.java @@ -43,12 +43,16 @@ import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; import software.amazon.awssdk.core.internal.http.timers.TimeoutTracker; import software.amazon.awssdk.core.internal.http.timers.TimerUtils; +import software.amazon.awssdk.core.internal.util.MetricUtils; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.async.SdkHttpContentPublisher; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.CompletableFutureUtils; import software.amazon.awssdk.utils.Logger; /** @@ -141,14 +145,17 @@ private CompletableFuture> executeHttpRequest(SdkHttpFullReque // Set content length if it hasn't been set already. SdkHttpFullRequest requestWithContentLength = getRequestWithContentLength(request, requestProvider); + MetricCollector httpMetricCollector = MetricUtils.createHttpMetricsCollector(context); + AsyncExecuteRequest executeRequest = AsyncExecuteRequest.builder() .request(requestWithContentLength) .requestContentPublisher(requestProvider) .responseHandler(wrappedResponseHandler) .fullDuplex(isFullDuplex(context.executionAttributes())) + .metricCollector(httpMetricCollector) .build(); - CompletableFuture httpClientFuture = sdkAsyncHttpClient.execute(executeRequest); + CompletableFuture httpClientFuture = doExecuteHttpRequest(context, executeRequest); TimeoutTracker timeoutTracker = setupAttemptTimer(responseFuture, context); context.apiCallAttemptTimeoutTracker(timeoutTracker); @@ -173,6 +180,23 @@ private CompletableFuture> executeHttpRequest(SdkHttpFullReque return responseFuture; } + private CompletableFuture doExecuteHttpRequest(RequestExecutionContext context, AsyncExecuteRequest executeRequest) { + MetricCollector metricCollector = context.attemptMetricCollector(); + long callStart = System.nanoTime(); + CompletableFuture httpClientFuture = sdkAsyncHttpClient.execute(executeRequest); + + // Offload the metrics reporting from this stage onto the future completion executor + CompletableFuture result = httpClientFuture.whenComplete((r, t) -> { + long duration = System.nanoTime() - callStart; + metricCollector.reportMetric(CoreMetric.SERVICE_CALL_DURATION, Duration.ofNanos(duration)); + }); + + // Make sure failures on the result future are forwarded to the http client future. + CompletableFutureUtils.forwardExceptionTo(result, httpClientFuture); + + return result; + } + private boolean isFullDuplex(ExecutionAttributes executionAttributes) { return executionAttributes.getAttribute(SdkInternalExecutionAttribute.IS_FULL_DUPLEX) != null && executionAttributes.getAttribute(SdkInternalExecutionAttribute.IS_FULL_DUPLEX); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeHttpRequestStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeHttpRequestStage.java index 05dbf2026d66..d93bc087c079 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeHttpRequestStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeHttpRequestStage.java @@ -15,18 +15,22 @@ package software.amazon.awssdk.core.internal.http.pipeline.stages; +import java.time.Duration; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.internal.http.HttpClientDependencies; import software.amazon.awssdk.core.internal.http.InterruptMonitor; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; +import software.amazon.awssdk.core.internal.util.MetricUtils; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.ExecutableHttpRequest; import software.amazon.awssdk.http.HttpExecuteRequest; import software.amazon.awssdk.http.HttpExecuteResponse; import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.utils.Pair; /** @@ -55,14 +59,25 @@ public Pair execute(SdkHttpFullRequest } private HttpExecuteResponse executeHttpRequest(SdkHttpFullRequest request, RequestExecutionContext context) throws Exception { + MetricCollector attemptMetricCollector = context.attemptMetricCollector(); + + MetricCollector httpMetricCollector = MetricUtils.createHttpMetricsCollector(context); + ExecutableHttpRequest requestCallable = sdkHttpClient .prepareRequest(HttpExecuteRequest.builder() .request(request) + .metricCollector(httpMetricCollector) .contentStreamProvider(request.contentStreamProvider().orElse(null)) .build()); context.apiCallTimeoutTracker().abortable(requestCallable); context.apiCallAttemptTimeoutTracker().abortable(requestCallable); - return requestCallable.call(); + + Pair measuredExecute = MetricUtils.measureDurationUnsafe(requestCallable); + + attemptMetricCollector.reportMetric(CoreMetric.SERVICE_CALL_DURATION, measuredExecute.right()); + + return measuredExecute.left(); } + } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/SigningStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/SigningStage.java index f436cbb91139..4c7479238a88 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/SigningStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/SigningStage.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.core.internal.http.pipeline.stages; +import java.time.Duration; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.http.ExecutionContext; @@ -24,9 +25,13 @@ import software.amazon.awssdk.core.internal.http.InterruptMonitor; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.RequestToRequestPipeline; +import software.amazon.awssdk.core.internal.util.MetricUtils; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.signer.AsyncRequestBodySigner; import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.Pair; /** * Sign the marshalled request (if applicable). @@ -52,15 +57,21 @@ public SdkHttpFullRequest execute(SdkHttpFullRequest request, RequestExecutionCo /** * Sign the request if the signer if provided and credentials are present. */ - private SdkHttpFullRequest signRequest(SdkHttpFullRequest request, RequestExecutionContext context) { + private SdkHttpFullRequest signRequest(SdkHttpFullRequest request, RequestExecutionContext context) throws Exception { updateInterceptorContext(request, context.executionContext()); Signer signer = context.signer(); + MetricCollector metricCollector = context.attemptMetricCollector(); if (shouldSign(signer)) { adjustForClockSkew(context.executionAttributes()); - SdkHttpFullRequest signedRequest = signer.sign(request, context.executionAttributes()); + Pair measuredSign = MetricUtils.measureDuration(() -> + signer.sign(request, context.executionAttributes())); + + metricCollector.reportMetric(CoreMetric.SIGNING_DURATION, measuredSign.right()); + + SdkHttpFullRequest signedRequest = measuredSign.left(); if (signer instanceof AsyncRequestBodySigner) { //Transform request body provider with signing operator diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper.java index 8ad4dc3df0b3..e16032fd1278 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/utils/RetryableStageHelper.java @@ -26,12 +26,14 @@ import software.amazon.awssdk.core.exception.NonRetryableException; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.internal.InternalCoreExecutionAttribute; import software.amazon.awssdk.core.internal.http.HttpClientDependencies; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncRetryableStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.RetryableStage; import software.amazon.awssdk.core.internal.retry.ClockSkewAdjuster; +import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.retry.RetryPolicyContext; import software.amazon.awssdk.core.retry.RetryUtils; @@ -45,6 +47,9 @@ */ @SdkInternalApi public class RetryableStageHelper { + public static final ExecutionAttribute LAST_BACKOFF_DELAY_DURATION = + new ExecutionAttribute<>("LastBackoffDuration"); + private final SdkHttpFullRequest request; private final RequestExecutionContext context; private final RetryPolicy retryPolicy; @@ -53,7 +58,6 @@ public class RetryableStageHelper { private int attemptNumber = 0; private SdkHttpResponse lastResponse = null; private SdkException lastException = null; - private Duration lastBackoffDelay = null; public RetryableStageHelper(SdkHttpFullRequest request, RequestExecutionContext context, @@ -99,6 +103,7 @@ public boolean retryPolicyAllowsRetry() { * Return the exception that should be thrown, because the retry policy did not allow the request to be retried. */ public SdkException retryPolicyDisallowedRetryException() { + context.executionContext().metricCollector().reportMetric(CoreMetric.RETRY_COUNT, retriesAttemptedSoFar(true)); return lastException; } @@ -118,7 +123,7 @@ public Duration getBackoffDelay() { result = retryPolicy.backoffStrategy().computeDelayBeforeNextRetry(context); } } - lastBackoffDelay = result; + context.executionAttributes().putAttribute(LAST_BACKOFF_DELAY_DURATION, result); return result; } @@ -128,7 +133,7 @@ public Duration getBackoffDelay() { public void logBackingOff(Duration backoffDelay) { SdkStandardLogger.REQUEST_LOGGER.debug(() -> "Retryable error detected. Will retry in " + backoffDelay.toMillis() + "ms. Request attempt number " + - attemptNumber); + attemptNumber, lastException); } /** @@ -138,13 +143,11 @@ public SdkHttpFullRequest requestToSend() { Integer availableRetryCapacity = TokenBucketRetryCondition.getCapacityForExecution(context.executionAttributes()) .map(TokenBucketRetryCondition.Capacity::capacityRemaining) .orElse(null); - + String headerValue = (attemptNumber - 1) + "/" + + context.executionAttributes().getAttribute(LAST_BACKOFF_DELAY_DURATION).toMillis() + "/" + + (availableRetryCapacity != null ? availableRetryCapacity : ""); return request.toBuilder() - .putHeader(SDK_RETRY_INFO_HEADER, - String.format("%s/%s/%s", - attemptNumber - 1, - lastBackoffDelay.toMillis(), - availableRetryCapacity != null ? availableRetryCapacity : "")) + .putHeader(SDK_RETRY_INFO_HEADER, headerValue) .build(); } @@ -171,6 +174,7 @@ public void adjustClockIfClockSkew(Response response) { */ public void attemptSucceeded() { retryPolicy.aggregateRetryCondition().requestSucceeded(retryPolicyContext(false)); + context.executionContext().metricCollector().reportMetric(CoreMetric.RETRY_COUNT, retriesAttemptedSoFar(false)); } /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java new file mode 100644 index 000000000000..0e26fb1c53c0 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java @@ -0,0 +1,92 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.util; + +import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZN_REQUEST_ID_HEADERS; +import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZ_ID_2_HEADER; + +import java.time.Duration; +import java.util.concurrent.Callable; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.NoOpMetricCollector; +import software.amazon.awssdk.utils.Pair; +import software.amazon.awssdk.utils.http.SdkHttpUtils; + +/** + * Utility methods for working with metrics. + */ +@SdkInternalApi +public final class MetricUtils { + + private MetricUtils() { + } + + /** + * Measure the duration of the given callable. + * + * @param c The callable to measure. + * @return A {@code Pair} containing the result of {@code c} and the duration. + */ + public static Pair measureDuration(Supplier c) { + long start = System.nanoTime(); + T result = c.get(); + Duration d = Duration.ofNanos(System.nanoTime() - start); + return Pair.of(result, d); + } + + /** + * Measure the duration of the given callable. + * + * @param c The callable to measure. + * @return A {@code Pair} containing the result of {@code c} and the duration. + */ + public static Pair measureDurationUnsafe(Callable c) throws Exception { + long start = System.nanoTime(); + T result = c.call(); + Duration d = Duration.ofNanos(System.nanoTime() - start); + return Pair.of(result, d); + } + + public static void collectHttpMetrics(MetricCollector metricCollector, SdkHttpFullResponse httpResponse) { + metricCollector.reportMetric(HttpMetric.HTTP_STATUS_CODE, httpResponse.statusCode()); + SdkHttpUtils.allMatchingHeadersFromCollection(httpResponse.headers(), X_AMZN_REQUEST_ID_HEADERS) + .forEach(v -> metricCollector.reportMetric(CoreMetric.AWS_REQUEST_ID, v)); + httpResponse.firstMatchingHeader(X_AMZ_ID_2_HEADER) + .ifPresent(v -> metricCollector.reportMetric(CoreMetric.AWS_EXTENDED_REQUEST_ID, v)); + } + + public static MetricCollector createAttemptMetricsCollector(RequestExecutionContext context) { + MetricCollector parentCollector = context.executionContext().metricCollector(); + if (parentCollector != null) { + return parentCollector.createChild("ApiCallAttempt"); + } + return NoOpMetricCollector.create(); + } + + public static MetricCollector createHttpMetricsCollector(RequestExecutionContext context) { + MetricCollector parentCollector = context.attemptMetricCollector(); + if (parentCollector != null) { + return parentCollector.createChild("HttpClient"); + } + return NoOpMetricCollector.create(); + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/Mimetype.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/Mimetype.java index 887e21182aee..8c50fa7df290 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/Mimetype.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/Mimetype.java @@ -116,10 +116,6 @@ public static Mimetype getInstance() { * to find the corresponding mime type. If the file has no extension, or the extension is not * available in the listing contained in this class, the default mimetype * application/octet-stream is returned. - *

- * A file extension is one or more characters that occur after the last period (.) in the file's name. - * If a file has no extension, - * Guesses the mimetype of file data based on the file's extension. * * @param path the file whose extension may match a known mimetype. * @return the file's mimetype based on its extension, or a default value of @@ -132,7 +128,7 @@ public String getMimetype(Path path) { if (file != null) { return getMimetype(file.toString()); } - return null; + return MIMETYPE_OCTET_STREAM; } /** @@ -140,10 +136,6 @@ public String getMimetype(Path path) { * to find the corresponding mime type. If the file has no extension, or the extension is not * available in the listing contained in this class, the default mimetype * application/octet-stream is returned. - *

- * A file extension is one or more characters that occur after the last period (.) in the file's name. - * If a file has no extension, - * Guesses the mimetype of file data based on the file's extension. * * @param file the file whose extension may match a known mimetype. * @return the file's mimetype based on its extension, or a default value of @@ -159,10 +151,6 @@ public String getMimetype(File file) { * no extension, or the extension is not available in the listing contained * in this class, the default mimetype application/octet-stream * is returned. - *

- * A file extension is one or more characters that occur after the last - * period (.) in the file's name. If a file has no extension, Guesses the - * mimetype of file data based on the file's extension. * * @param fileName The name of the file whose extension may match a known * mimetype. diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java new file mode 100644 index 000000000000..90b208de9188 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java @@ -0,0 +1,127 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.metrics; + +import java.time.Duration; +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; + +/** + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@SdkPublicApi +public final class CoreMetric { + /** + * The unique ID for the service. This is present for all API call metrics. + */ + public static final SdkMetric SERVICE_ID = + metric("ServiceId", String.class, MetricLevel.ERROR); + + /** + * The name of the service operation being invoked. This is present for all + * API call metrics. + */ + public static final SdkMetric OPERATION_NAME = + metric("OperationName", String.class, MetricLevel.ERROR); + + /** + * True if the API call succeeded, false otherwise. + */ + public static final SdkMetric API_CALL_SUCCESSFUL = + metric("ApiCallSuccessful", Boolean.class, MetricLevel.ERROR); + + /** + * The number of retries that the SDK performed in the execution of the request. 0 implies that the request worked the first + * time, and no retries were attempted. + */ + public static final SdkMetric RETRY_COUNT = + metric("RetryCount", Integer.class, MetricLevel.ERROR); + + /** + * The duration of the API call. This includes all call attempts made. + * + *

{@code API_CALL_DURATION ~= CREDENTIALS_FETCH_DURATION + MARSHALLING_DURATION + SUM_ALL(BACKOFF_DELAY_DURATION) + + * SUM_ALL(SIGNING_DURATION) + SUM_ALL(SERVICE_CALL_DURATION) + SUM_ALL(UNMARSHALLING_DURATION)} + */ + public static final SdkMetric API_CALL_DURATION = + metric("ApiCallDuration", Duration.class, MetricLevel.INFO); + + /** + * The duration of time taken to fetch signing credentials for the API call. + */ + public static final SdkMetric CREDENTIALS_FETCH_DURATION = + metric("CredentialsFetchDuration", Duration.class, MetricLevel.INFO); + + + /** + * The duration of time that the SDK has waited before this API call attempt, based on the + * {@link RetryPolicy#backoffStrategy()}. + */ + public static final SdkMetric BACKOFF_DELAY_DURATION = + metric("BackoffDelayDuration", Duration.class, MetricLevel.INFO); + + /** + * The duration of time taken to marshall the SDK request to an HTTP request. + */ + public static final SdkMetric MARSHALLING_DURATION = + metric("MarshallingDuration", Duration.class, MetricLevel.INFO); + + /** + * The duration of time taken to sign the HTTP request. + */ + public static final SdkMetric SIGNING_DURATION = + metric("SigningDuration", Duration.class, MetricLevel.INFO); + + /** + * The duration of time taken to connect to the service (or acquire a connection from the connection pool), send the + * serialized request and receive the initial response (e.g. HTTP status code and headers). This DOES NOT include the time + * taken to read the entire response from the service. + */ + public static final SdkMetric SERVICE_CALL_DURATION = + metric("ServiceCallDuration", Duration.class, MetricLevel.INFO); + + /** + * The duration of time taken to unmarshall the HTTP response to an SDK response. + * + *

Note: For streaming operations, this does not include the time to read the response payload. + */ + public static final SdkMetric UNMARSHALLING_DURATION = + metric("UnmarshallingDuration", Duration.class, MetricLevel.INFO); + + /** + * The request ID of the service request. + */ + public static final SdkMetric AWS_REQUEST_ID = + metric("AwsRequestId", String.class, MetricLevel.INFO); + + /** + * The extended request ID of the service request. + */ + public static final SdkMetric AWS_EXTENDED_REQUEST_ID = + metric("AwsExtendedRequestId", String.class, MetricLevel.INFO); + + private CoreMetric() { + } + + private static SdkMetric metric(String name, Class clzz, MetricLevel level) { + return SdkMetric.create(name, clzz, level, MetricCategory.CORE); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/RequestOverrideConfigurationTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/RequestOverrideConfigurationTest.java index 4ae3e660a470..dd275b54f304 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/RequestOverrideConfigurationTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/RequestOverrideConfigurationTest.java @@ -17,6 +17,7 @@ import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; import java.util.ArrayList; import java.util.Arrays; @@ -25,6 +26,7 @@ import java.util.List; import java.util.Map; import org.junit.Test; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.utils.ImmutableMap; public class RequestOverrideConfigurationTest { @@ -93,4 +95,74 @@ public void shouldGuaranteeImmutability() { assertThat(configurationBuilder.headers().size()).isEqualTo(1); assertThat(configurationBuilder.headers().get("foo")).containsExactly("bar"); } + + @Test + public void metricPublishers_createsCopy() { + List publishers = new ArrayList<>(); + publishers.add(mock(MetricPublisher.class)); + List toModify = new ArrayList<>(publishers); + + SdkRequestOverrideConfiguration overrideConfig = SdkRequestOverrideConfiguration.builder() + .metricPublishers(toModify) + .build(); + + toModify.clear(); + + assertThat(overrideConfig.metricPublishers()).isEqualTo(publishers); + } + + @Test + public void addMetricPublisher_maintainsAllAdded() { + List publishers = new ArrayList<>(); + publishers.add(mock(MetricPublisher.class)); + publishers.add(mock(MetricPublisher.class)); + publishers.add(mock(MetricPublisher.class)); + + SdkRequestOverrideConfiguration.Builder builder = SdkRequestOverrideConfiguration.builder(); + + publishers.forEach(builder::addMetricPublisher); + + SdkRequestOverrideConfiguration overrideConfig = builder.build(); + + assertThat(overrideConfig.metricPublishers()).isEqualTo(publishers); + } + + @Test + public void metricPublishers_overwritesPreviouslyAdded() { + MetricPublisher firstAdded = mock(MetricPublisher.class); + + List publishers = new ArrayList<>(); + + publishers.add(mock(MetricPublisher.class)); + publishers.add(mock(MetricPublisher.class)); + + SdkRequestOverrideConfiguration.Builder builder = SdkRequestOverrideConfiguration.builder(); + + builder.addMetricPublisher(firstAdded); + + builder.metricPublishers(publishers); + + SdkRequestOverrideConfiguration overrideConfig = builder.build(); + + assertThat(overrideConfig.metricPublishers()).isEqualTo(publishers); + } + + @Test + public void addMetricPublisher_listPreviouslyAdded_appendedToList() { + List publishers = new ArrayList<>(); + + publishers.add(mock(MetricPublisher.class)); + publishers.add(mock(MetricPublisher.class)); + + MetricPublisher thirdAdded = mock(MetricPublisher.class); + + SdkRequestOverrideConfiguration.Builder builder = SdkRequestOverrideConfiguration.builder(); + + builder.metricPublishers(publishers); + builder.addMetricPublisher(thirdAdded); + + SdkRequestOverrideConfiguration overrideConfig = builder.build(); + + assertThat(overrideConfig.metricPublishers()).containsExactly(publishers.get(0), publishers.get(1), thirdAdded); + } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java index e2da5dc9beaa..df52434beafa 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java @@ -19,6 +19,7 @@ import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; +import io.reactivex.Flowable; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; @@ -31,6 +32,7 @@ import org.junit.runners.Parameterized; import org.reactivestreams.Subscriber; import software.amazon.awssdk.http.async.SimpleSubscriber; +import software.amazon.awssdk.utils.BinaryUtils; @RunWith(Parameterized.class) public class AsyncRequestBodyTest { @@ -93,4 +95,17 @@ public void onComplete() { done.await(); assertThat(sb.toString()).isEqualTo(testString); } + + @Test + public void fromBytes_byteArrayNotNull_createsCopy() { + byte[] original = {0x1, 0x2, 0x3, 0x4}; + byte[] toModify = new byte[original.length]; + System.arraycopy(original, 0, toModify, 0, original.length); + AsyncRequestBody body = AsyncRequestBody.fromBytes(toModify); + for (int i = 0; i < toModify.length; ++i) { + toModify[i]++; + } + ByteBuffer publishedBb = Flowable.fromPublisher(body).toList().blockingGet().get(0); + assertThat(BinaryUtils.copyAllBytesFrom(publishedBb)).isEqualTo(original); + } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/config/ClientOverrideConfigurationTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/config/ClientOverrideConfigurationTest.java index a24136c60460..f4a0e1da7701 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/config/ClientOverrideConfigurationTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/config/ClientOverrideConfigurationTest.java @@ -18,6 +18,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; import java.util.ArrayList; import java.util.Arrays; @@ -28,6 +29,7 @@ import org.junit.Test; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.internal.http.request.SlowExecutionInterceptor; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.utils.ImmutableMap; public class ClientOverrideConfigurationTest { @@ -108,4 +110,74 @@ public void shouldGuaranteeImmutability() { assertThat(configurationBuilder.headers().get("foo")).containsExactly("bar"); assertThat(configurationBuilder.executionInterceptors()).containsExactly(slowExecutionInterceptor); } + + @Test + public void metricPublishers_createsCopy() { + List publishers = new ArrayList<>(); + publishers.add(mock(MetricPublisher.class)); + List toModify = new ArrayList<>(publishers); + + ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder() + .metricPublishers(toModify) + .build(); + + toModify.clear(); + + assertThat(overrideConfig.metricPublishers()).isEqualTo(publishers); + } + + @Test + public void addMetricPublisher_maintainsAllAdded() { + List publishers = new ArrayList<>(); + publishers.add(mock(MetricPublisher.class)); + publishers.add(mock(MetricPublisher.class)); + publishers.add(mock(MetricPublisher.class)); + + ClientOverrideConfiguration.Builder builder = ClientOverrideConfiguration.builder(); + + publishers.forEach(builder::addMetricPublisher); + + ClientOverrideConfiguration overrideConfig = builder.build(); + + assertThat(overrideConfig.metricPublishers()).isEqualTo(publishers); + } + + @Test + public void metricPublishers_overwritesPreviouslyAdded() { + MetricPublisher firstAdded = mock(MetricPublisher.class); + + List publishers = new ArrayList<>(); + + publishers.add(mock(MetricPublisher.class)); + publishers.add(mock(MetricPublisher.class)); + + ClientOverrideConfiguration.Builder builder = ClientOverrideConfiguration.builder(); + + builder.addMetricPublisher(firstAdded); + + builder.metricPublishers(publishers); + + ClientOverrideConfiguration overrideConfig = builder.build(); + + assertThat(overrideConfig.metricPublishers()).isEqualTo(publishers); + } + + @Test + public void addMetricPublisher_listPreviouslyAdded_appendedToList() { + List publishers = new ArrayList<>(); + + publishers.add(mock(MetricPublisher.class)); + publishers.add(mock(MetricPublisher.class)); + + MetricPublisher thirdAdded = mock(MetricPublisher.class); + + ClientOverrideConfiguration.Builder builder = ClientOverrideConfiguration.builder(); + + builder.metricPublishers(publishers); + builder.addMetricPublisher(thirdAdded); + + ClientOverrideConfiguration overrideConfig = builder.build(); + + assertThat(overrideConfig.metricPublishers()).containsExactly(publishers.get(0), publishers.get(1), thirdAdded); + } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransfomerTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransfomerTest.java deleted file mode 100644 index e2630b4ed7c5..000000000000 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransfomerTest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.internal.async; - -import static org.assertj.core.api.Assertions.assertThat; -import com.google.common.jimfs.Jimfs; -import java.io.IOException; -import java.nio.file.FileSystem; -import java.nio.file.Path; -import java.util.concurrent.CompletableFuture; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.reactivestreams.Subscription; - -/** - * Tests for {@link FileAsyncResponseTransformer}. - */ -public class FileAsyncResponseTransfomerTest { - private static FileSystem testFs; - - @BeforeClass - public static void setup() { - testFs = Jimfs.newFileSystem(); - } - - @AfterClass - public static void teardown() throws IOException { - testFs.close(); - } - - @Test - public void errorInStream_completesFuture() { - Path testPath = testFs.getPath("test_file.txt"); - FileAsyncResponseTransformer xformer = new FileAsyncResponseTransformer(testPath); - - CompletableFuture prepareFuture = xformer.prepare(); - - xformer.onResponse(new Object()); - xformer.onStream(subscriber -> { - subscriber.onSubscribe(new Subscription() { - @Override - public void request(long l) { - } - - @Override - public void cancel() { - } - }); - - subscriber.onError(new RuntimeException("Something went wrong")); - }); - - assertThat(prepareFuture.isCompletedExceptionally()).isTrue(); - } -} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformerTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformerTest.java new file mode 100644 index 000000000000..015ecbdcca9e --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformerTest.java @@ -0,0 +1,131 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.jimfs.Jimfs; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.FileSystem; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.core.async.SdkPublisher; + +/** + * Tests for {@link FileAsyncResponseTransformer}. + */ +public class FileAsyncResponseTransformerTest { + private static FileSystem testFs; + + @BeforeClass + public static void setup() { + testFs = Jimfs.newFileSystem(); + } + + @AfterClass + public static void teardown() throws IOException { + testFs.close(); + } + + @Test + public void errorInStream_completesFuture() { + Path testPath = testFs.getPath("test_file.txt"); + FileAsyncResponseTransformer xformer = new FileAsyncResponseTransformer(testPath); + + CompletableFuture prepareFuture = xformer.prepare(); + + xformer.onResponse(new Object()); + xformer.onStream(subscriber -> { + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long l) { + } + + @Override + public void cancel() { + } + }); + + subscriber.onError(new RuntimeException("Something went wrong")); + }); + + assertThat(prepareFuture.isCompletedExceptionally()).isTrue(); + } + + @Test + public void synchronousPublisher_shouldNotHang() throws Exception { + List futures = new ArrayList<>(); + + for (int i = 0; i < 10; i++) { + Path testPath = testFs.getPath(i + "test_file.txt"); + FileAsyncResponseTransformer transformer = new FileAsyncResponseTransformer(testPath); + + CompletableFuture prepareFuture = transformer.prepare(); + + transformer.onResponse(new Object()); + + transformer.onStream(new TestPublisher()); + futures.add(prepareFuture); + } + + CompletableFuture future = CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])); + future.get(10, TimeUnit.SECONDS); + assertThat(future.isCompletedExceptionally()).isFalse(); + } + + static class TestPublisher implements SdkPublisher { + private AtomicInteger requestNumber = new AtomicInteger(0); + private volatile boolean isDone = false; + + @Override + public void subscribe(Subscriber s) { + + s.onSubscribe(new Subscription() { + @Override + public void request(long l) { + + if (isDone) { + return; + } + requestNumber.incrementAndGet(); + + if (requestNumber.get() == 2) { + isDone = true; + s.onComplete(); + return; + } + + s.onNext(ByteBuffer.wrap(RandomStringUtils.randomAlphanumeric(30000).getBytes())); + } + + @Override + public void cancel() { + } + }); + } + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStageTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStageTest.java index 3e78313a9920..e1e0e26eeea5 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStageTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeAsyncHttpRequestStageTest.java @@ -15,8 +15,11 @@ package software.amazon.awssdk.core.internal.http.pipeline.stages; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -34,17 +37,23 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import software.amazon.awssdk.core.client.config.SdkAdvancedAsyncClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.http.ExecutionContext; import software.amazon.awssdk.core.http.NoopTestRequest; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.internal.http.HttpClientDependencies; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.timers.ClientExecutionAndRequestTimerTestUtils; import software.amazon.awssdk.core.internal.util.AsyncResponseHandlerTestUtils; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.metrics.MetricCollector; import utils.ValidSdkObjects; @RunWith(MockitoJUnitRunner.class) @@ -91,6 +100,49 @@ public void apiCallAttemptTimeoutNotEnabled_shouldNotInvokeExecutor() throws Exc verify(timeoutExecutor, never()).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); } + @Test + public void testExecute_contextContainsMetricCollector_addsChildToExecuteRequest() { + stage = new MakeAsyncHttpRequestStage<>( + combinedAsyncResponseHandler(AsyncResponseHandlerTestUtils.noOpResponseHandler(), + AsyncResponseHandlerTestUtils.noOpResponseHandler()), + clientDependencies(null)); + + SdkHttpFullRequest sdkHttpRequest = SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .host("mybucket.s3.us-west-2.amazonaws.com") + .protocol("https") + .build(); + + MetricCollector mockCollector = mock(MetricCollector.class); + MetricCollector childCollector = mock(MetricCollector.class); + + when(mockCollector.createChild(any(String.class))).thenReturn(childCollector); + + ExecutionContext executionContext = ExecutionContext.builder() + .executionAttributes(new ExecutionAttributes()) + .build(); + + RequestExecutionContext context = RequestExecutionContext.builder() + .originalRequest(ValidSdkObjects.sdkRequest()) + .executionContext(executionContext) + .build(); + + context.attemptMetricCollector(mockCollector); + + try { + stage.execute(sdkHttpRequest, context); + } catch (Exception e) { + e.printStackTrace(); + // ignored, don't really care about successful execution of the stage in this case + } finally { + ArgumentCaptor httpRequestCaptor = ArgumentCaptor.forClass(AsyncExecuteRequest.class); + + verify(mockCollector).createChild(eq("HttpClient")); + verify(sdkAsyncHttpClient).execute(httpRequestCaptor.capture()); + assertThat(httpRequestCaptor.getValue().metricCollector()).contains(childCollector); + } + } + private HttpClientDependencies clientDependencies(Duration timeout) { SdkClientConfiguration configuration = SdkClientConfiguration.builder() .option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, Runnable::run) diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeHttpRequestStageTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeHttpRequestStageTest.java new file mode 100644 index 000000000000..5636852bf5a3 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/MakeHttpRequestStageTest.java @@ -0,0 +1,97 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.core.client.config.SdkClientOption.SYNC_HTTP_CLIENT; +import java.io.IOException; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.http.ExecutionContext; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.internal.http.HttpClientDependencies; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.timers.TimeoutTracker; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.metrics.MetricCollector; +import utils.ValidSdkObjects; + +@RunWith(MockitoJUnitRunner.class) +public class MakeHttpRequestStageTest { + + @Mock + private SdkHttpClient mockClient; + + private MakeHttpRequestStage stage; + + @Before + public void setup() throws IOException { + SdkClientConfiguration config = SdkClientConfiguration.builder().option(SYNC_HTTP_CLIENT, mockClient).build(); + stage = new MakeHttpRequestStage(HttpClientDependencies.builder().clientConfiguration(config).build()); + } + + @Test + public void testExecute_contextContainsMetricCollector_addsChildToExecuteRequest() { + SdkHttpFullRequest sdkRequest = SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .host("mybucket.s3.us-west-2.amazonaws.com") + .protocol("https") + .build(); + + MetricCollector mockCollector = mock(MetricCollector.class); + MetricCollector childCollector = mock(MetricCollector.class); + + when(mockCollector.createChild(any(String.class))).thenReturn(childCollector); + + ExecutionContext executionContext = ExecutionContext.builder() + .executionAttributes(new ExecutionAttributes()) + .build(); + + RequestExecutionContext context = RequestExecutionContext.builder() + .originalRequest(ValidSdkObjects.sdkRequest()) + .executionContext(executionContext) + .build(); + + context.attemptMetricCollector(mockCollector); + context.apiCallAttemptTimeoutTracker(mock(TimeoutTracker.class)); + context.apiCallTimeoutTracker(mock(TimeoutTracker.class)); + + try { + stage.execute(sdkRequest, context); + } catch (Exception e) { + // ignored, don't really care about successful execution of the stage in this case + } finally { + ArgumentCaptor httpRequestCaptor = ArgumentCaptor.forClass(HttpExecuteRequest.class); + + verify(mockCollector).createChild(eq("HttpClient")); + verify(mockClient).prepareRequest(httpRequestCaptor.capture()); + assertThat(httpRequestCaptor.getValue().metricCollector()).contains(childCollector); + } + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java index 5528486f3ed7..50c47c953bee 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/AsyncHttpClientApiCallTimeoutTests.java @@ -53,6 +53,7 @@ import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; import utils.ValidSdkObjects; public class AsyncHttpClientApiCallTimeoutTests { @@ -107,6 +108,7 @@ public void errorResponse_SlowAfterErrorRequestHandler_ThrowsApiCallTimeoutExcep .interceptorChain(interceptors) .executionAttributes(new ExecutionAttributes()) .interceptorContext(incerceptorContext) + .metricCollector(MetricCollector.create("ApiCall")) .build(); CompletableFuture future = @@ -181,6 +183,7 @@ private ExecutionContext withInterceptors(ExecutionInterceptor... requestHandler .interceptorChain(interceptors) .executionAttributes(new ExecutionAttributes()) .interceptorContext(incerceptorContext) + .metricCollector(MetricCollector.create("ApiCall")) .build(); } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/ClientExecutionAndRequestTimerTestUtils.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/ClientExecutionAndRequestTimerTestUtils.java index 5b439a711b03..b4923e2b363e 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/ClientExecutionAndRequestTimerTestUtils.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/ClientExecutionAndRequestTimerTestUtils.java @@ -34,6 +34,7 @@ import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.metrics.MetricCollector; /** * Useful asserts and utilities for verifying behavior or the client execution timeout and request @@ -116,6 +117,7 @@ public static ExecutionContext executionContext(SdkHttpFullRequest request) { .interceptorChain(new ExecutionInterceptorChain(Collections.emptyList())) .executionAttributes(new ExecutionAttributes()) .interceptorContext(incerceptorContext) + .metricCollector(MetricCollector.create("ApiCall")) .build(); } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java index b1c6c36642b5..b0f0ee487e66 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallAttemptTimeoutTest.java @@ -48,6 +48,7 @@ import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; import utils.ValidSdkObjects; @@ -143,6 +144,7 @@ private ExecutionContext withInterceptors(ExecutionInterceptor... requestHandler .interceptorChain(interceptors) .executionAttributes(new ExecutionAttributes()) .interceptorContext(incerceptorContext) + .metricCollector(MetricCollector.create("ApiCall")) .build(); } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java index 2b58e2a55706..937d3bf851cb 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/timers/HttpClientApiCallTimeoutTest.java @@ -48,6 +48,7 @@ import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; import utils.ValidSdkObjects; @@ -143,6 +144,7 @@ private ExecutionContext withInterceptors(ExecutionInterceptor... requestHandler .interceptorChain(interceptors) .executionAttributes(new ExecutionAttributes()) .interceptorContext(incerceptorContext) + .metricCollector(MetricCollector.create("ApiCall")) .build(); } } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/util/MetricUtilsTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/util/MetricUtilsTest.java new file mode 100644 index 000000000000..b32a543a7404 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/util/MetricUtilsTest.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.util; + +import static org.assertj.core.api.Java6Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +import java.io.IOException; +import java.time.Duration; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.Pair; + +public class MetricUtilsTest { + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testMeasureDuration_returnsAccurateDurationInformation() { + long testDurationNanos = Duration.ofMillis(1).toNanos(); + + Pair measuredExecute = MetricUtils.measureDuration(() -> { + long start = System.nanoTime(); + // spin thread instead of Thread.sleep() for a bit more accuracy... + while (System.nanoTime() - start < testDurationNanos) { + } + return "foo"; + }); + + assertThat(measuredExecute.right()).isGreaterThanOrEqualTo(Duration.ofNanos(testDurationNanos)); + } + + @Test + public void testMeasureDuration_returnsCallableReturnValue() { + String result = "foo"; + + Pair measuredExecute = MetricUtils.measureDuration(() -> result); + + assertThat(measuredExecute.left()).isEqualTo(result); + } + + @Test + public void testMeasureDurationUnsafe_doesNotWrapException() throws Exception { + IOException ioe = new IOException("boom"); + + thrown.expect(IOException.class); + try { + MetricUtils.measureDurationUnsafe(() -> { + throw ioe; + }); + } catch (IOException caught) { + assertThat(caught).isSameAs(ioe); + throw caught; + } + } + + @Test + public void testMeasureDuration_doesNotWrapException() { + RuntimeException e = new RuntimeException("boom"); + + thrown.expect(RuntimeException.class); + + try { + MetricUtils.measureDuration(() -> { + throw e; + }); + } catch (RuntimeException caught) { + assertThat(caught).isSameAs(e); + throw caught; + } + } + + @Test + public void testCollectHttpMetrics_collectsAllExpectedMetrics() { + MetricCollector mockCollector = mock(MetricCollector.class); + + int statusCode = 200; + String requestId = "request-id"; + String amznRequestId = "amzn-request-id"; + String requestId2 = "request-id-2"; + + SdkHttpFullResponse response = SdkHttpFullResponse.builder() + .statusCode(statusCode) + .putHeader("x-amz-request-id", requestId) + .putHeader(HttpResponseHandler.X_AMZN_REQUEST_ID_HEADER, amznRequestId) + .putHeader(HttpResponseHandler.X_AMZ_ID_2_HEADER, requestId2) + .build(); + + MetricUtils.collectHttpMetrics(mockCollector, response); + + verify(mockCollector).reportMetric(HttpMetric.HTTP_STATUS_CODE, statusCode); + verify(mockCollector).reportMetric(CoreMetric.AWS_REQUEST_ID, requestId); + verify(mockCollector).reportMetric(CoreMetric.AWS_REQUEST_ID, amznRequestId); + verify(mockCollector).reportMetric(CoreMetric.AWS_EXTENDED_REQUEST_ID, requestId2); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/util/MimetypeTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/util/MimetypeTest.java index 9f94d72e0032..a4dbd7df4611 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/util/MimetypeTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/util/MimetypeTest.java @@ -16,7 +16,10 @@ package software.amazon.awssdk.core.internal.util; import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import java.nio.file.Path; import org.junit.BeforeClass; import org.junit.Test; @@ -48,4 +51,11 @@ public void unknownExtensions_defaulttoBeStream() throws Exception { public void noExtensions_defaulttoBeStream() throws Exception { assertThat(mimetype.getMimetype("test")).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); } + + @Test + public void pathWithoutFileName_defaulttoBeStream() throws Exception { + Path mockPath = mock(Path.class); + when(mockPath.getFileName()).thenReturn(null); + assertThat(mimetype.getMimetype(mockPath)).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + } } diff --git a/docs/GettingStarted.md b/docs/GettingStarted.md index db417706179d..2e308be40edd 100644 --- a/docs/GettingStarted.md +++ b/docs/GettingStarted.md @@ -8,17 +8,17 @@ ### Development Environment Setup Tips If you use IntelliJ IDEA, we include some helpful config files that will make your development experience smoother: -- [intellij-codestyle.xml](https://github.com/aws/aws-sdk-java-v2/blob/master/build-tools/src/main/resources/software/amazon/awssdk/intellij-codestyle.xml) +- [intellij-codestyle.xml](https://raw.githubusercontent.com/aws/aws-sdk-java-v2/master/build-tools/src/main/resources/software/amazon/awssdk/intellij-codestyle.xml) This will help ensure your code follows our code style guidelines. -- [intellij-copyright-profile.xml](https://github.com/aws/aws-sdk-java-v2/blob/master/build-tools/src/main/resources/software/amazon/awssdk/intellij-copyright-profile.xml) +- [intellij-copyright-profile.xml](https://raw.githubusercontent.com/aws/aws-sdk-java-v2/master/build-tools/src/main/resources/software/amazon/awssdk/intellij-copyright-profile.xml) This automatically inserts the license header to the top of source files that you create. If you have Checkstyle integrated with your IDE, we also recommend configuring it with our -[Checkstyle config](https://github.com/aws/aws-sdk-java-v2/blob/master/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml) +[Checkstyle config](https://raw.githubusercontent.com/aws/aws-sdk-java-v2/master/build-tools/src/main/resources/software/amazon/awssdk/checkstyle.xml) so you can see any violations in line with the code. ### Building diff --git a/docs/design/core/metrics/DecisionLog.md b/docs/design/core/metrics/DecisionLog.md new file mode 100644 index 000000000000..0f2601b859f7 --- /dev/null +++ b/docs/design/core/metrics/DecisionLog.md @@ -0,0 +1,41 @@ +# Decision Log for SDK V2 Metrics + +Note: The decision log process was implemented late in this project, so decisions earlier than 7/30/20 are not included +below. + +## Log Entry Template + +**Source:** (Meeting/aside/pair programming discussion/daily standup) to (discuss/implement) X + +**Attendees:** Anna-Karin, Ben, Dongie, Irene, Matt, Nico, Vinod, Zoe + +**Closed Decisions:** + +1. Question? Decision. Justification. + +**Open Decisions:** + +1. (Old/Reopened/New) Question? + +## 6/30/20 + +**Source:** Meeting to discuss https://github.com/aws/aws-sdk-java-v2/pull/1926 (metrics configuration design) + +**Attendees:** Anna-Karin, Ben, Dongie, Matt, Nico, Zoe + +**Closed Decisions:** + +1. Should Option 1 override Option 2 or append? Override. That is what intuitively makes sense to us, and is aligned with +what most other similar settings do. +2. Should we return a builder from the SPI? No. We can't come up with a concrete case for it, and can another method to +the SPI in the future that takes in a "configuration" object if we find a need. +3. Should we support upper/lowercase boolean environment variables? Be consistent with what we do elsewhere. Consistency +is a core tenet. +4. Should we support a profile-file setting? No. We don't want to be like the CLI and come up with arbitrary SDK-specific +properties, but we can explore that option in the future if customers ask for it. +5. Should we use the service loader? Yes. It's consistent with the way HTTP clients work, and it keeps the module off of +the classpath. + +**Open Decisions:** + +None \ No newline at end of file diff --git a/docs/design/core/metrics/Design.md b/docs/design/core/metrics/Design.md index 6220f121d8c6..c1f757623221 100644 --- a/docs/design/core/metrics/Design.md +++ b/docs/design/core/metrics/Design.md @@ -1,280 +1,259 @@ +# SDK Metrics System ## Concepts ### Metric -* A representation of data collected -* Metric can be one of the following types: Counter, Gauge, Timer -* Metric can be associated to a category. Some of the metric categories are Default, HttpClient, Streaming etc +* A measure of some aspect of the SDK. Examples include request latency, number + of pooled connections and retries executed. -### MetricRegistry +* A metric is associated to a category. Some of the metric categories are + `Default`, `HttpClient` and `Streaming`. This enables customers to enable + metrics only for categories they are interested in. -* A MetricRegistry represent an interface to store the collected metric data. It can hold different types of Metrics - described above -* MetricRegistry is generic and not tied to specific category (ApiCall, HttpClient etc) of metrics. -* Each API call has it own instance of a MetricRegistry. All metrics collected in the ApiCall lifecycle are stored in - that instance. -* A MetricRegistry can store other instances of same type. This can be used to store metrics for each Attempt in an Api - Call. -* [Interface prototype](prototype/MetricRegistry.java) +Refer to the [Metrics List](./MetricsList.md) document for a complete list of +standard metrics collected by the SDK. + +### Metric Collector + +* `MetricCollector` is a typesafe aggregator of of metrics. This is the primary + interface through which other SDK components report metrics they emit, using + the `reportMetric(SdkMetric,Object)` method. + +* `MetricCollector` objects allow for nesting. This enables metrics to be + collected in the context of other metric events. For example, for a single + API call, there may be multiple request attempts if there are retries. Each + attempt's associated metric events can be stored in their own + `MetricCollector`, all of which are children of another collector that + represents metrics for the entire API call. + + A child of a collector is created by calling its `childCollector(String)` + method. + +* The `collect()` method returns a `MetricCollection`. This class essentially + returns an immutable version of the tree formed by the collector and its + children, which are also represented by `MetricCollection` objects. + + Note that calling `collect()` implies that child collectors are are also + collected. + +* Each collector has a name. Often this is will be used to describe the class of + metrics that it collects; e.g. `"ApiCall"` and `"ApiCallAttempt"`. + +* [Interface prototype](prototype/MetricCollector.java) ### MetricPublisher -* A MetricPublisher represent an interface to publish the collected metrics to a external source. -* SDK provides implementations to publish metrics to services like [Amazon - CloudWatch](https://aws.amazon.com/cloudwatch/), [Client Side - Monitoring](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/sdk-metrics.html) (also known as AWS SDK - Metrics for Enterprise Support) -* Customers can implement the interface and register the custom implementation to publish metrics to a platform not - supported in the SDK. -* MetricPublishers can have different behaviors in terms of list of metrics to publish, publishing frequency, - configuration needed to publish etc. -* Metrics can be explicitly published to the platform by calling publish() method. This can be useful in scenarios when - the application fails and customer wants to flush metrics before exiting the application. -* [Interface prototype](prototype/MetricPublisher.java) +* A `MetricPublisher` publishes collected metrics to a system(s) outside of the + SDK. It takes a `MetricCollection` object, potentially transforms the data + into richer metrics, and also into a format the receiver expects. -### Reporting +* By default, the SDK will provide implementations to publish metrics to [Amazon + CloudWatch](https://aws.amazon.com/cloudwatch/) and [Client Side + Monitoring](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/sdk-metrics.html) + (also known as AWS SDK Metrics for Enterprise Support). -* Reporting is transferring the collected metrics to Publishers. -* To report metrics to a publisher, call the registerMetrics(MetricRegistry) method on the MetricPublisher. -* There is no requirement for Publisher to publish the reported metrics immediately after calling this method. +* Metrics publishers are pluggable within the SDK, allowing customers to + provide their own custom implementations. +* Metric publishers can have different behaviors in terms of list of metrics to + publish, publishing frequency, configuration needed to publish etc. + +* [Interface prototype](prototype/MetricPublisher.java) ## Enabling Metrics -Metrics feature is disabled by default. Metrics can be enabled at client level in the following ways. +The metrics feature is disabled by default. Metrics can be enabled and configured in the following ways: -### Feature Flags (Metrics Provider) +### Option 1: Configuring MetricPublishers on a request -* SDK exposes an [interface](prototype/MetricConfigurationProvider.java) to enable the metrics feature and specify - options to configure the metrics behavior. -* SDK provides an implementation of this interface based on system properties. -* Here are the system properties SDK supports: - - **aws.javasdk2x.metrics.enabled** - Metrics feature is enabled if this system property is set - - **aws.javasdk2x.metrics.category** - Comma separated set of MetricCategory that are enabled for collection -* SDK calls the methods in this interface for each request ie, enabled() method is called for every request to determine - if the metrics feature is enabled or not (similarly for other configuration options). - - This allows customers to control metrics behavior in a more flexible manner; for example using an external database - like DynamoDB to dynamically control metrics collection. This is useful to enable/disable metrics feature and - control metrics options at runtime without the need to make code changes or re-deploy the application. -* As the interface methods are called for each request, it is recommended for the implementations to run expensive tasks - asynchronously in the background, cache the results and periodically refresh the results. +A publisher can be configured directly on the `RequestOverrideConfiguration`: ```java -ClientOverrideConfiguration config = ClientOverrideConfiguration - .builder() - // If this is not set, SDK uses the default chain with system property - .metricConfigurationProvider(new SystemSettingsMetricConfigurationProvider()) - .build(); - -// Set the ClientOverrideConfiguration instance on the client builder -CodePipelineAsyncClient asyncClient = - CodePipelineAsyncClient - .builder() - .overrideConfiguration(config) - .build(); +MetricPublisher metricPublisher = CloudWatchMetricPublisher.create(); +DynamoDbClient dynamoDb = DynamoDbClient.create(); +dynamoDb.listTables(ListTablesRequest.builder() + .overrideConfiguration(c -> c.addMetricPublisher(metricPublisher)) + .build()); ``` -### Metrics Provider Chain - -* Customers might want to have different ways of enabling the metrics feature. For example: use SystemProperties by - default. If not use implementation based on Amazon DynamoDB. -* To support multiple providers, SDK allows setting chain of providers (similar to the CredentialsProviderChain to - resolve credentials). As provider has multiple configuration options, a single provider is resolved at chain - construction time and it is used throughout the lifecycle of the application to keep the behavior intuitive. -* If no custom chain is provided, SDK will use a default chain while looks for the System properties defined in above - section. SDK can add more providers in the default chain in the future without breaking customers. +The methods exposed for setting metric publishers follow the pattern established by `ExecutionInterceptor`s: ```java -MetricConfigurationProvider chain = new MetricConfigurationProviderChain( - new SystemSettingsMetricConfigurationProvider(), - // example custom implementation (not provided by the SDK) - DynamoDBMetricConfigurationProvider.builder() - .tableName(TABLE_NAME) - .enabledKey(ENABLE_KEY_NAME) - ... - .build(), - ); - -ClientOverrideConfiguration config = ClientOverrideConfiguration - .builder() - // If this is not set, SDK uses the default chain with system property - .metricConfigurationProvider(chain) - .build(); - -// Set the ClientOverrideConfiguration instance on the client builder -CodePipelineAsyncClient asyncClient = - CodePipelineAsyncClient - .builder() - .overrideConfiguration(config) - .build(); +class RequestOverrideConfiguration { + // ... + class Builder { + // ... + Builder metricPublishers(List metricsPublishers); + Builder addMetricPublisher(MetricPublisher metricsPublisher); + } +} ``` -### Metric Publishers Configuration +### Option 2: Configuring MetricPublishers on a client -* If metrics are enabled, SDK by default uses a single publisher that uploads metrics to CloudWatch using default - credentials and region. -* Customers might want to use different configuration for the CloudWatch publisher or even use a different publisher to - publish to a different source. To provide this flexibility, SDK exposes an option to set - [MetricPublisherConfiguration](prototype/MetricPublisherConfiguration.java) which can be used to configure custom - publishers. -* SDK publishes the collected metrics to each of the configured publishers in the MetricPublisherConfiguration. +A publisher can be configured directly on the `ClientOverrideConfiguration`. A publisher specified in this way is used +with lower priority than **Option 1** above. ```java -ClientOverrideConfiguration config = ClientOverrideConfiguration - .builder() - .metricPublisherConfiguration(MetricPublisherConfiguration - .builder() - .addPublisher( - CloudWatchPublisher.builder() - .credentialsProvider(...) - .region(Region.AP_SOUTH_1) - .publishFrequency(5, TimeUnit.MINUTES) - .build(), - CsmPublisher.create()).bu - .build()) - .build(); - -// Set the ClientOverrideConfiguration instance on the client builder -CodePipelineAsyncClient asyncClient = - CodePipelineAsyncClient - .builder() - .overrideConfiguration(config) - .build(); +MetricPublisher metricPublisher = CloudWatchMetricPublisher.create(); +DynamoDbClient dynamoDb = DynamoDbClient.builder() + .overrideConfiguration(c -> c.addMetricPublisher(metricPublisher)) + .build(); ``` +The methods exposed for setting metric publishers follow the pattern established by `ExecutionInterceptor`s: -## Modules -New modules are created to support metrics feature. +```java +class ClientOverrideConfiguration { + // ... + class Builder { + // ... + Builder metricPublishers(List metricsPublishers); + Builder addMetricPublisher(MetricPublisher metricsPublisher); + } +} +``` -### metrics-spi -* Contains the metrics interfaces and default implementations that don't require other dependencies -* This is a sub module under `core` -* `sdk-core` has a dependency on `metrics-spi`, so customers will automatically get a dependency on this module. - -### metrics-publishers -* This is a new module that contains implementations of all SDK supported publishers -* Under this module, a new sub-module is created for each publisher (`cloudwatch-publisher`, `csm-publisher`) -* Customers has to **explicitly add dependency** on these modules to use the sdk provided publishers +**Note:** As with the `httpClient` setting, calling `close()` on the `DynamoDbClient` *will not* close the configured +`metricPublishers`. You must close the `metricPublishers` yourself when you're done using them. +### Option 3: Configuring MetricPublishers using System Properties or Environment Variables -## Sequence Diagram +This option allows the customer to enable metric publishing by default, without needing to enable it via **Option 1** +or **Option 2** above. This means that a customer can enable metrics without needing to make a change to their runtime +code. -Metrics Collection +This option is enabled using an environment variable or system property. If both are specified, the system property +will be used. If metrics are enabled at the client level using **Option 2** above, this option is ignored. Overriding +the metric publisher at request time using **Option 1** overrides any publishers that have been enabled globally. -

+**System Property:** `aws.metricPublishingEnabled=true` -![Metrics Collection](images/MetricCollection.jpg) +**Environment Variable:** `AWS_METRIC_PUBLISHING_ENABLED=true` -
+The value specified must be one of `"true"` or `"false"`. Specifying any other string values will result in +a value of `"false"` being used, and a warning being logged each time an SDK client is created. -MetricPublisher +When the value is `"false"`, no metrics will be published by a client. -
+When the value is `"true"`, metrics will be published by every client to a set of "global metric publishers". The set +of global metric publishers is loaded automatically using the same mechanism currently used to discover HTTP +clients. This means that including the `cloudwatch-metric-publisher` module and enabling the system property or +environment variable above is sufficient to enable metric publishing to CloudWatch on all AWS clients. -![MetricPublisher fig.align="left"](images/MetricPublisher.jpg) +The set of "Global Metric Publishers" is static and is used for *all* AWS SDK clients instantiated by the application +(while **Option 3** remains enabled). A JVM shutdown hook will be registered to invoke `MetricPublisher.close()` on +every publisher (in case the publishers use non-daemon threads that would otherwise block JVM shutdown). -
+#### Updating a MetricPublisher to work as a global metric publisher -1. Client enables metrics feature through MetricConfigurationProvider and configure publishers through - MetricPublisherConfiguration. -2. For each API call, a new MetricRegistry object is created and stored in the ExecutionAttributes. If metrics are not - enabled, a NoOpMetricRegistry is used. -3. At each metric collection point, the metric is registered in the MetricRegistry object if its category is enabled in - MetricConfigurationProvider. -4. The metrics that are collected once for a Api Call execution are stored in the METRIC_REGISTRY ExecutionAttribute. -5. The metrics that are collected per Api Call attempt are stored in new MetricRegistry instances which are part of the - ApiCall MetricRegistry. These MetricRegistry instance for the current attempt is also accessed through - ATTEMPT_METRIC_REGISTRY ExecutionAttribute. -6. At end of API call, report the MetricRegistry object to MetricPublishers by calling registerMetrics(MetricRegistry) - method. This is done in an ExecutionInterceptor. -7. Steps 2 to 6 are repeated for each API call -8. MetricPublisher calls publish() method to report metrics to external sources. The frequency of publish() method call - is unique to Publisher implementation. -9. Client has access to all registered publishers and it can call publish() method explicitly if desired. +**Option 3** above references the concept of "Global Metric Publishers", which are a set of publishers that are +discovered automatically by the SDK. This section outlines how global metric publishers are discovered and created. +Each `MetricPublisher` that supports loading when **Option 3** is enabled must: +1. Provide an `SdkMetricPublisherService` implementation. An `SdkMetricPublisherService` implementation is a class with +a zero-arg constructor, used to instantiate a specific type of `MetricPublisher` (e.g. a +`CloudWatchMetricPublisherService` that is a factory for `CloudWatchMetricPublisher`s). +2. Provide a resource file: `META-INF/services/software.amazon.awssdk.metrics.SdkMetricPublisherService`. This file +contains the list of fully-qualified `SdkMetricPublisherService` implementation class names. -CloudWatch MetricPublisher +The `software.amazon.awssdk.metrics.SdkMetricPublisherService` interface that must be implemented by all global metric +publisher candidates is defined as: -
+```java +public interface SdkMetricPublisherService { + MetricPublisher createMetricPublisher(); +} +``` -![CloudWatch MetricPublisher](images/CWMetricPublisher.jpg) +**`SdkMetricPublisherService` Example** -
+Enabling the `CloudWatchMetricPublisher` as a global metric publisher can be done by implementing the +`SdkMetricPublisherService` interface: -## Implementation Details -Few important implementation details are discussed in this section. +```java +package software.amazon.awssdk.metrics.publishers.cloudwatch; + +public final class CloudWatchSdkMetricPublisherService implements SdkMetricPublisherService { + @Override + public MetricPublisher createMetricPublisher() { + return CloudWatchMetricPublisher.create(); + } +} +``` -SDK modules can be organized as shown in this image. +And creating a `META-INF/services/software.amazon.awssdk.metrics.SdkMetricPublisherService` resource file in the +`cloudwatch-metric-publisher` module with the following contents: -
+``` +software.amazon.awssdk.metrics.publishers.cloudwatch.CloudWatchSdkMetricPublisherService +``` -![Module Hierarchy](images/MetricsModulesHierarchy.png) +#### Option 3 Implementation Details and Edge Cases -
+**How the SDK loads `MetricPublisher`s when Option 3 is enabled** -* Core modules - Modules in the core directory while have access to ExecutionContext and ExecutionAttributes -* Downstream modules - Modules where execution occurs after core modules. For example, http-clients is downstream module - as the request is transferred from core to http client for further execution. -* Upstream modules - Modules that live in layers above core. Examples are High Level libraries (HLL) or Applications - that use SDK. Execution goes from Upstream modules to core modules. +When a client is created with **Option 3** enabled (and **Option 2** "not specified"), the client retrieves the list of +global metric publishers to use via a static "global metric publisher list" singleton. This singleton is initialized +exactly once using the following process: +1. The singleton uses `java.util.ServiceLoader` to locate all `SdkMetricPublisherService` implementations configured +as described above. The classloader used with the service loader is chosen in the same manner as the one chosen for the +HTTP client service loader (`software.amazon.awssdk.core.internal.http.loader.SdkServiceLoader`). That is, the first +classloader present in the following list: (1) the classloader that loaded the SDK, (2) the current thread's classloader, +then (3) the system classloader. +2. The singleton creates an instance of every `SdkMetricPublisherService` located in this manner. +3. The singleton creates an instance of each `MetricPublisher` instance using the metrics publisher services. -### Core Modules -* SDK will use ExecutionAttributes to pass the MetricConfigurationProvider information through out the core module where - core request-response metrics are collected. -* Instead of checking whether metrics is enabled at each metric collection point, SDK will use the instance of - NoOpMetricRegistry (if metrics are disabled) and DefaultMetricRegistry (if metrics are enabled). -* The NoOpMetricRegistry class does not collect or store any metric data. Instead of creating a new NoOpMetricRegistry - instance for each request, use the same instance for every request to avoid additional object creation. -* The DefaultMetricRegistry class will only collect metrics if they belong to the MetricCategory list provided in the - MetricConfigurationProvider. To support this, DefaultMetricRegistry is decorated by another class to filter metric - categories that are not set in MetricConfigurationProvider. +**How Option 3 and Option 1 behave when Option 2 is "not specified"** -### Downstream Modules -* The MetricRegistry object and other required metric configuration details will be passed to the classes in downstream - modules. -* For example, HttpExecuteRequest for sync http client, AsyncExecuteRequest for async http client. -* Downstream modules record the metric data directly into the given MetricRegistry object. -* As we use same MetricRegistry object for core and downstream modules, both metrics will be reported to the Publisher - together. +The SDK treats **Option 3** as the default set of client-level metric publishers to be +used when **Option 2** is "not specified". This means that if a customer: (1) enables global metric publishing using +**Option 3**, (2) does not specify client-level publishers using **Option 2**, and (3) specifies metric publishers at +the request level with **Option 1**, then the global metric publishers are still *instantiated* but will not be used. +This nuance prevents the SDK from needing to consult the global metric configuration with every request. -### Upstream Modules -* As MetricRegistry object is created after the execution is passed from Upstream modules, these modules won't be able - to modify/add to the core metrics. -* If upstream modules want to report additional metrics using the registered publishers, they would need to create - MetricRegistry instances and explicitly call the methods on the Publishers. -* It would be useful to get the low-level API metrics in these modules, so SDK will expose APIs to get an immutable - version of the MetricRegistry object so that upstream classes can use that information in their metric calculation. +**How Option 2 is considered "not specified" for the purposes of considering Option 3** -### Reporting -* Collected metrics are reported to the configured publishers at the end of each Api Call by calling - `registerMetrics(MetricRegistry)` method on MetricPublisher. -* The MetricRegistry argument in the registerMetrics method will have data on the entire Api Call including retries. -* This reporting is done in `MetricsExecutionInterceptor` via `afterExecution()` and `onExecutionFailure()` methods. -* `MetricsExecutionInterceptor` will always be the last configured ExecutionInterceptor in the interceptor chain +Global metric publishers (**Option 3**) are only considered for use when **Option 2** is "not specified". +"Not specified" is defined to be when the customer either: (1) does not invoke +`ClientOverrideConfiguration.Builder.addMetricPublisher()` / `ClientOverrideConfiguration.Builder.metricPublishers()`, +or (2) invokes `ClientOverrideConfiguration.Builder.metricPublishers(null)` as the last `metricPublisher`-mutating +action on the client override configuration builder. -## Performance -One of the main tenet for metrics is “Enabling default metrics should have minimal impact on the application -performance". The following design choices are made to ensure enabling metrics does not effect performance -significantly. -* When collecting metrics, a NoOpRegistry is used if metrics are disabled. All methods in this registry are no-op and - return immediately. This also has the additional benefit of avoid metricsEnabled check at each metric collection - point. -* Metric publisher implementations can involve network calls and impact latency if done in blocking way. So all SDK - publisher implementation will process the metrics asynchronously and does not block the actual request. +This definition purposefully excludes `ClientOverrideConfiguration.Builder.metricPublishers(emptyList())`. Setting +the `metricPublishers` to an empty list is equivalent to setting the `metricPublishers` to the `NoOpMetricPublisher`. +**Implementing an SdkMetricPublisherService that depends on an AWS clients** -## Testing +Any `MetricPublisher`s that supports creation via a `SdkMetricPublisherService` and depends on an AWS service client +**must** disable metric publishing on those AWS service clients using **Option 2** when they are created via the +`SdkMetricPublisherService`. This is to prevent a scenario where the global metric publisher singleton's initialization +process depends on the global metric publishers singleton already being initialized. -To ensure performance is not impacted due to metrics, tests should be written with various scenarios and a baseline for -overhead should be created. These tests should be run regularly to catch regressions. +## Modules +New modules are created to support metrics feature. -### Test Cases +### metrics-spi +* Contains the metrics interfaces and default implementations that don't require other dependencies +* This is a sub module under `core` +* `sdk-core` has a dependency on `metrics-spi`, so customers will automatically get a dependency on this module. -SDK will be tested under load for each of these test cases using the load testing framework we already have. Each of -these test case results should be compared with metrics feature disabled & enabled, and then comparing the results. +### metrics-publishers +* This is a new module that contains implementations of all SDK supported publishers +* Under this module, a new sub-module is created for each publisher (`cloudwatch-publisher`, `csm-publisher`) +* Customers has to **explicitly add dependency** on these modules to use the sdk provided publishers -1. Enable each metrics publisher (CloudWatch, CSM) individually. -2. Enable all metrics publishers. -3. Individually enable each metric category to find overhead for each MetricCategory. +## Performance +One of the main tenets for metrics is “Enabling default metrics should have +minimal impact on the application performance". The following design choices are +made to ensure enabling metrics does not effect performance significantly. +* When collecting metrics, a No-op metric collector is used if metrics are + disabled. All methods in this collector are no-op and return immediately. +* Metric publisher implementations can involve network calls and impact latency + if done in blocking way. Therefore, all SDK publisher implementations will + process the metrics asynchronously to not block the request thread. +* Performance tests will be written and run with each release to ensure that the + SDK performs well even when metrics are enabled and being collected and + published. diff --git a/docs/design/core/metrics/MetricsList.md b/docs/design/core/metrics/MetricsList.md index f93c912be42a..1ad8145262cd 100644 --- a/docs/design/core/metrics/MetricsList.md +++ b/docs/design/core/metrics/MetricsList.md @@ -1,130 +1,61 @@ -Here is the detailed list of metrics that SDK can collect. Each metric belongs to a category. If a category is enabled, -then all metrics belonging to that category will be collected by the SDK. - -## Category - -1) Default - All metrics under this category will be collected when the metrics are enabled -2) HttpClient - Additional information collected for http client. The metrics collected for each http client can vary -3) All - All metrics collected by the SDK comes under this category. This can be useful for debugging purposes. - -Note: When metrics feature is enabled, only the `Default` category metrics are collected. Other categories should be -explicitly enabled. - -## Information collected at application level (Category: Default) - -| Metric Name | Meter | Description | -| ------------------ | ----------- | ---------------- | -| RequestCount | Counter | Total number of requests (successful and failed) made from your code to AWS services -| SuccessRequestCount | Counter | Total number of requests from your code to AWS services that resulted in a successful response -| FailedRequestCount | Counter | Total number of requests from your code to AWS services that resulted in a failure. This can be expanded later to categorize the failures into buckets (like ClientErrorCount, ServiceErrorCount, ConnectionErrorCount etc) - -## Information collected for each request (ApiCall) (Category: Default) - -| Metric Name | Meter | Description | -| ------------------ | ----------- | ---------------- | -| Service | ConstantGauge | Service ID of the AWS service that the API request is made against -| Api | ConstantGauge | The name of the AWS API the request is made to -| StreamingRequest | ConstantGauge | True if the request has streaming payload -| StreamingResponse | ConstantGauge | True if the response has streaming payload -| ApiCallStartTime | Timer | The start time of the request -| ApiCallEndTime | Timer | The end time of the request -| ApiCallLatency | Timer | The total time taken to finish a request (inclusive of all retries), ApiCallEndTime - ApiCallStartTime -| MarshallingLatency | Timer | The time taken to marshall the request -| ApiCallAttemptCount | Counter | Total number of attempts that were made by the service client to fulfill this request before succeeding or failing. (Value is 1 if there are no retries) - -Each ApiCall can have multiple attempts before the call succeed or fail. The following metrics are collected for each ApiCall Attempt. - -| Metric Name | Meter | Description | -| ------------------ | ----------- | ---------------- | -| ApiCallAttemptStartTime | Timer | The start time of each Api call attempt -| SigningLatency | Timer | The time taken to sign the request in an Api Call Attempt -| HttpRequestRoundTripLatency | Timer | The time taken by the underlying http client to start the Api call attempt and return the response -| UnmarshallingLatency | Timer | The time taken to unmarshall the response (same metric for both successful and failed requests) -| ApiCallAttemptEndTime | Timer | The end time of a Api call attempt -| ApiCallAttemptLatency | Timer | The total time taken for an Api call attempt (exclusive of retries), ApiCallAttemptEndTime - ApiCallAttemptStartTime -| AwsRequestId | ConstantGauge | The request Id for the request. Represented by `x-amz-request-id` header in response -| ExtendedRequestId | ConstantGauge | The extended request Id for the request. Represented by `x-amz-id-2` header in response -| HttpStatusCode | ConstantGauge | The http status code returned in the response. Null if there is no response -| AwsException | ConstantGauge | The Aws exception code returned by the service. This is included for each Api call attempt if the call results in a failure and caused by service -| SdkException | ConstantGauge | The error name for any failure that is due to something other than an Aws exception. This is included for each API call attempt if the call results in a failure and is caused by something other than service - -For each attempt, the following http client metrics are collected: - -| Metric Name | Meter | Description | -| ------------------ | ----------- | ---------------- | -| HttpClientName | ConstantGauge | Name of the underlying http client (Apache, Netty, UrlConnection) -| MaxConnections | Gauge | Maximum number of connections allowed in the connection pool -| AvailableConnections | Gauge | The number of idle connections in the connection pool that are ready to serve a request -| LeasedConnections | Gauge | The number of connections in the connection pool that are busy serving requests -| PendingRequests | Gauge | The number of requests awaiting a free connection from the pool - -## Additional Information collected for each http client (Category: HttpClient) - -### ApacheHttpClient -HttpClientName - Apache - -No additional metrics available for apache client currently - -### UrlConnectionHttpClient -HttpClientName - UrlConnection - -No additional metrics available for url connection client currently - -### NettyNioAsyncHttpClient -HttpClientName - Netty - -| Metric Name | Meter | Description | -| ------------------ | ----------- | ---------------- | -| FailedConnectionClose | Counter | Number of times a connection close has failed -| FailedPoolAcquire | Counter | Number of times a request failed to acquire a connection - -For Http2 requests, - -| Metric Name | Meter | Description | -| ------------------ | ----------- | ---------------- | -| ConnectionId | ConstantGauge | The identifier for a connection -| MaxStreamCount | Gauge | Maximum number of streams allowed on the connection -| CurrentStreamCount | Gauge | Number of active streams on the connection - - -## Information collected for event stream requests (Category: Default) - -| Metric Name | Meter | Description | -| ------------------ | ----------- | ---------------- | -| RequestEventsReceivedCount | Counter | Number of events received from the client -| RequestEventsSentCount | Counter | Number of events sent to the service -| ResponseEventsReceivedCount | Counter | Number of events received from the service -| ResponseEventsDeliveredCount | Counter | Number of events delivered to the client -| RequestSubscriptionCreated | Counter | Number of request subscriptions created to deliver events from client to service (For event stream requests like startStreamTranscription API in Transcribe Streaming service) -| RequestSubscriptionCompleted | Counter | Number of request subscriptions completed -| RequestSubscriptionCanceled | Counter | Number of request subscriptions canceled -| ResponseSubscriptionCreated | Counter | Number of response subscriptions created to deliver events from service to client -| ResponseSubscriptionCompleted | Counter | Number of response subscriptions completed -| ResponseSubscriptionCanceled | Counter | Number of response subscriptions canceled - - -## FAQ -1) When is the end time calculated for async requests? - The end time is calculated when the future is completed (either successfully or exceptionally) as opposed to the time when future is returned from API - -2) What errors are considered as throttling errors? - The request was considered as throttled if one of the following conditions are met: - 1) The http status code is equal to: `429` or `503` - 2) The error code is equal to one of the following values: - SlowDown - SlowDownException - Throttling - ThrottlingException - Throttled - ThrottledException - ServiceUnavailable - ServiceUnavailableException - ServiceUnavailableError - ProvisionedThroughputExceededException - TooManyRequests - TooManyRequestsException - DescribeAttachmentLimitExceeded - - -## References -1) [V1 Metrics Description](https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/metrics/package-summary.html) +# Collected Metrics + + This document lists the metrics collected by various components of the SDK, + namely the core libraries, and HTTP clients. + + A note on collector path: The path is assuming a `MetricCollection` tree rooted at an API call. + +## Core Metrics + +The set of core metrics includes all metrics collected by the core components +of the SDK. This includes components like SDK service clients, +request/resposne marshallers and unmarshallers, and signers. + +All the in code constants associated with metric below can be found in the +[`software.amazon.awssdk.core.metrics.CoreMetric`](https://github.com/aws/aws-sdk-java-v2/blob/8c192e3b04892987bf0872f76ba4f65167f3a872/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java#L24) +class within `sdk-core`. + +| Name | Type | Description | +|-------------------------------|---------------|-------------| +| ServiceId | `String` | The unique ID for the service. This is present for all API call metrics.| +| OperationName | `String` | The name of the service operation being invoked. This is present for all API call metrics.| +| ApiCallDuration | `Duration` | The duration of the API call. This includes all call attempts made.| +| ApiCallSuccessful | `Boolean` | True if the API call succeeded, false otherwise. | +| BackoffDelayDuration | `Duration` | The duration of time that the SDK has waited before this API call attempt, based on the retry policy. | +| MarshallingDuration | `Duration` | The duration of time taken to marshall the SDK request to an HTTP request.| +| CredentialsFetchDuration | `Duration` | The duration of time taken to fetch signing credentials for the request.| +| SigningDuration | `Duration` | The duration of time taken to sign the HTTP request.| +| AwsRequestId | `String` | The request ID of the service request.| +| AwsExtendedRequestId | `String` | The extended request ID of the service request.| +| UnmarshallingDuration | `Duration` | The duration of time taken to unmarshall the HTTP response to an SDK response. | +| ServiceCallDuration | `Duration` | The duration of time taken to connect to the service (or acquire a connection from the connection pool), send the serialized request and receive the initial response (e.g. HTTP status code and headers). This DOES NOT include the time taken to read the entire response from the service. | +| `RetryCount` | `Integer` | The number of retries that the SDK performed in the execution of the request. 0 implies that the request worked the first time, and no retries were attempted. | + +## HTTP Metrics + +The set of HTTP metrics below are collected by components that implement the [HTTP SPI](https://github.com/aws/aws-sdk-java-v2/tree/sdk-metrics-development-2/http-client-spi). Which metrics are collected depends on the specific HTTP library used to implement the SPI; not all libraries will allow the collection of every metric below. + +Note that in the context of an SDK client API call, all `HttpClient` collectors are children of `ApiCallAttept`; i.e. the full path to HTTP client metrics for an individual API call attempt is `ApiCall` > `ApiCallAttept` > `HttpClient`. + +### Common HTTP Metrics + +Below are the metrics common to both HTTP/1.1 and HTTP/2 operations. + +The constants are located in `software.amazon.awssdk.http.HttpMetric` class in the `http-spi` module. + +| Name | Type | Description | +|-------------------------------|-----------|-------------| +| HttpClientName | `String` | The name of the HTTP client. | +| MaxConcurrency | `Integer` | For HTTP/1.1 operations, this is equal to the maximum number of TCP connections that can be be pooled by the HTTP client. For HTTP/2 operations, this is equal to the maximum number of streams that can be pooled by the HTTP client. +| LeasedConcurrency | `Integer` | The number of requests that are currently being executed by the HTTP client. | +| PendingConcurrencyAcquires | `Integer` | The number of requests that are awaiting concurrency to be made available from the HTTP client. | +| HttpStatusCode | `Integer` | The status code of the HTTP response. | + +### HTTP/2 Metrics + +Below are the metrics specific to HTTP/2 operations. + +| Name | Type | Description | +|--------------------------|-----------|--------------| +| LocalStreamWindowSize | `Integer` | The local HTTP/2 window size in bytes for the stream that this request was executed on. | +| RemoteStreamWindowSize | `Integer` | The remote HTTP/2 window size in bytes for the stream that this request was executed on. | diff --git a/docs/design/core/metrics/README.md b/docs/design/core/metrics/README.md deleted file mode 100644 index 3f94783c8040..000000000000 --- a/docs/design/core/metrics/README.md +++ /dev/null @@ -1,69 +0,0 @@ -**Design:** New Feature, **Status:** -[In Development](../../../README.md) - -# Project Tenets (unless you know better ones) - -1. Metrics can be used to provide insights about application behavior to enhance performance and debug operational - issues. -2. Enabling default metrics should have minimal impact on the application performance. -3. Customers can publish the collected metrics to their choice of platform. -4. Metrics are divided into different categories for granular control. -5. Customers can control the cost by having the ability to enable/disable the metrics collection by category. -6. Metrics collected by SDK are namespaced to avoid collision with other application metrics. - - -# Project Introduction - -This project adds a feature to the AWS SDK for Java that can collect and report client side SDK metrics in your -application. Metrics helps developers, ops engineers to detect and diagnose issues in their applications. The metrics -can also be used to gather insights into the application over time and tune the application for optimal performance. - - -# Project Details - -1. Metrics are disabled by default and should be enabled explicitly by customers. Enabling metrics will introduce small - overhead. -2. Metrics can be enabled quickly during large scale events with need for code change or deployments. -3. Customers may publish metrics using their existing credentials. -4. Metrics are stored and accessed by AWS only with explicit permissions from the customer. -5. New Metrics can be added and published by the SDK into existing categories. - - -# Metrics Meters -Meters define the way a metric is measured. Here are the list of meters: - -**Counter :** Number of times a metric is reported. These kind of metrics can be incremented or decremented. -For example: number of requests made since the start of application - -**Timer :** Records the time between start of an event and end of an event. An example is the time taken (latency) to -complete a request. - -**Gauge :** A value recorded at a point in time. An example is the number of connections in the client pool. - -**Constant Gauge :** There are metrics that have a static value which doesn't change after it is set. Some examples are -service name, API name, status code, request id. To support this, a constant implementation of gauge is used - -Reference: Some Meter names are taken from open source -[spectator](http://netflix.github.io/spectator/en/latest/intro/counter/) project (Apache 2.0 license). - -# Naming - -1. Metric names should be in CamelCase format. -2. Only Alphabets and numbers are allowed in metric names. - -## Collected Metrics - -The full list of metrics collected by the SDK are documented [here](MetricsList.md) along with their definitions. - - -# Metric Publishers - -Metric Publishers are the implementations that are used to publish metrics to different platforms. -SDK provides default publishers to publish to following platforms for convenience. -Customers can implement custom publishers to publish metrics to platforms not supported by SDK. - -## Supported platforms -1) CloudWatch - -2) CSM - Client Side Monitoring (also known as [AWS SDK Metrics for Enterprise -Support](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/sdk-metrics.html)) diff --git a/docs/design/core/metrics/prototype/MetricCollection.java b/docs/design/core/metrics/prototype/MetricCollection.java new file mode 100644 index 000000000000..2d65588a2a40 --- /dev/null +++ b/docs/design/core/metrics/prototype/MetricCollection.java @@ -0,0 +1,54 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/** + * An immutable collection of metrics. + */ +public interface MetricCollection extends SdkIterable> { + /** + * @return The name of this metric collection. + */ + String name(); + + /** + * Return all the values of the given metric. An empty list is returned if + * there are no reported values for the given metric. + * + * @param metric The metric. + * @param The type of the value. + * @return All of the values of this metric. + */ + List metricValues(SdkMetric metric); + + /** + * Returns the child metric collections. An empty list is returned if there + * are no children. + * @return The child metric collections. + */ + List children(); + + /** + * Return all of the {@link #children()} with a specific name. + * + * @param name The name by which we will filter {@link #children()}. + * @return The child metric collections that have the provided name. + */ + Stream childrenWithName(String name); + + /** + * @return The time at which this collection was created. + */ + Instant creationTime(); +} diff --git a/docs/design/core/metrics/prototype/MetricCollector.java b/docs/design/core/metrics/prototype/MetricCollector.java new file mode 100644 index 000000000000..7a3c05ae89dd --- /dev/null +++ b/docs/design/core/metrics/prototype/MetricCollector.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/** + * Used to collect metrics collected by the SDK. + *

+ * Collectors are allowed to nest, allowing metrics to be collected within the + * context of other metrics. + */ +@NotThreadSafe +@SdkPublicApi +public interface MetricCollector { + /** + * @return The name of this collector. + */ + String name(); + + /** + * Report a metric. + */ + void reportMetric(SdkMetric metric, T value); + + /** + * + * @param name The name of the child collector. + * @return The child collector. + */ + MetricCollector createChild(String name); + + /** + * Return the collected metrics. The returned {@code MetricCollection} must + * preserve the children of this collector; in other words the tree formed + * by this collector and its children should be identical to the tree formed + * by the returned {@code MetricCollection} and its child collections. + *

+ * Calling {@code collect()} prevents further invocations of {@link + * #reportMetric(SdkMetric, Object)}. + * + * @return The collected metrics. + */ + MetricCollection collect(); + + static MetricCollector create(String name) { + return DefaultMetricCollector.create(name); + } +} diff --git a/docs/design/core/metrics/prototype/MetricPublisher.java b/docs/design/core/metrics/prototype/MetricPublisher.java index 1cb4c5b5a90b..6e2be37b2232 100644 --- a/docs/design/core/metrics/prototype/MetricPublisher.java +++ b/docs/design/core/metrics/prototype/MetricPublisher.java @@ -1,68 +1,45 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.metrics.publisher; - -import java.util.concurrent.CompletableFuture; -import software.amazon.awssdk.annotations.SdkPublicApi; -import software.amazon.awssdk.metrics.registry.MetricRegistry; - /** - * Interface to report and publish the collected SDK metrics to external sources. - * - * Publisher implementations create and maintain resources (like clients, thread pool etc) that are used for publishing. - * They should be closed in the close() method to avoid resource leakage. - * - *

- * As metrics are not part of the business logic, failures caused by metrics features should not fail the application. - * So SDK publisher implementations suppress all errors during the metrics publishing and log them. - *

- * + * Interface to report and publish the collected SDK metric events to external + * sources. *

- * In certain situations (high throttling errors, metrics are reported faster than publishing etc), storing all the metrics - * might take up lot of memory and can crash the application. In these cases, it is recommended to have a max limit on - * number of metrics stored or memory used for metrics and drop the metrics when the limit is breached. - *

+ * Conceptually, a publisher receives a stream of {@link MetricCollection} + * objects overs its lifetime through its {@link #publish(MetricCollection)} )} + * method. Implementations are then free further aggregate these events into + * sets of metrics that are then published to some external system for further + * use. As long as a publisher is not closed, then it can receive {@code + * MetricCollection} objects at any time. In addition, as the SDK makes use of + * multithreading, it's possible that the publisher is shared concurrently by + * multiple threads, and necessitates that all implementations are threadsafe. */ +@ThreadSafe @SdkPublicApi -public interface MetricPublisher extends AutoCloseable { - +public interface MetricPublisher extends SdkAutoCloseable { /** - * Registers the metric information supplied in MetricsRegistry. The reported metrics can be transformed and - * stored in a format the publisher uses to publish the metrics. + * Notify the publisher of new metric data. After this call returns, the + * caller can safely discard the given {@code metricCollection} instance if + * it no longer needs it. Implementations are strongly encouraged to + * complete any further aggregation and publishing of metrics in an + * asynchronous manner to avoid blocking the calling thread. + *

+ * With the exception of a {@code null} {@code metricCollection}, all + * invocations of this method must return normally. This is to ensure that + * callers of the publisher can safely assume that even in situations where + * an error happens during publishing that it will not interrupt the calling + * thread. * - * This method is called at the end of each request execution to report all the metrics collected - * for that request (including retry attempt metrics) + * @param metricCollection The collection of metrics. + * @throws IllegalArgumentException If {@code metricCollection} is {@code + * null}. */ - void registerMetrics(MetricRegistry metricsRegistry); + void publish(MetricCollection metricCollection); /** - * Publish all metrics stored in the publisher. If all available metrics cannot be published in a single call, - * multiple calls will be made to publish the metrics. - * - * It is recommended to publish the metrics in a non-blocking way. As it is common to publish metrics to an external - * source which involves network calls, the method is intended to be implemented in a non-blocking way and thus - * returns a {@link CompletableFuture}. - * - * Depending on the implementation, the metrics are published to the external source periodically like: - * a) after a certain time period - * b) after n metrics are registered - * c) after the buffer is full - * - * Implementations can also call publish method for every reported metric. But this can be expensive and - * is not recommended. + * {@inheritDoc} + *

+ * Important: Implementations must block the calling thread until all + * pending metrics are published and any resources acquired have been freed. */ - CompletableFuture publish(); + @Override + void close(); } + diff --git a/docs/design/core/metrics/prototype/MetricRegistry.java b/docs/design/core/metrics/prototype/MetricRegistry.java deleted file mode 100644 index e24a54620531..000000000000 --- a/docs/design/core/metrics/prototype/MetricRegistry.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.metrics.registry; - -import java.util.List; -import java.util.Map; -import java.util.Optional; -import software.amazon.awssdk.annotations.SdkPublicApi; -import software.amazon.awssdk.metrics.meter.Counter; -import software.amazon.awssdk.metrics.meter.Gauge; -import software.amazon.awssdk.metrics.meter.Metric; -import software.amazon.awssdk.metrics.meter.Timer; - -/** - * Registry to store the collected metrics data. The interface can be used to store metrics for ApiCall and ApiCallAttempt. - * For a ApiCall, there can be multiple attempts and so a MetricRegistry has the option to store other MetricRegistry instances. - */ -@SdkPublicApi -public interface MetricRegistry { - - /** - * Return the ApiCall level metrics registered in this metric registry as a map of metric name to metric instance. - * Only metrics that can be recorded once for entire request lifecycle are recorded here. - * - * The method does not return the Api Call Attempt metrics. For metrics recorded separately for each attempt, - * see {@link #apiCallAttemptMetrics()}. - */ - Map getMetrics(); - - - /** - * Return an ordered list of {@link MetricRegistry} instances recorded for each Api Call Attempt in the request execution. - * Each Api call attempt metrics are recorded as a separate {@link MetricRegistry} instance in the given list. - * - * For example, - * If the Api finishes (succeed or fail) in the first attempt, the returned list size will be 1. - * - * If the Api finishes after 4 attempts (1 initial attempt + 3 retries), the returned list size will be 4. In this case, - * The 0th entry in the list has the metrics for the initial attempt, - * The 1st entry in the list has the metrics for the second attempt (1st retry) and so on. - * - * @return an ordered list of {@link MetricRegistry} instances, one for each Api Call Attempt in the request execution - */ - List apiCallAttemptMetrics(); - - /** - * Create and return a new instance of {@link MetricRegistry} for the current ApiCall Attempt. - * Records the registry instance within the class. The instance for the current attempt can be accessed by calling - * the {@link #apiCallAttemptMetrics()} method and getting the last element in the output list. - * - * If the Api Call finishes in the first attempt, this method is only called once. - * If the Api Call finishes after n retry attmpts, this method is called n + 1 times - * (1 time for initial attempt, n times for n retries) - * - * @return a instance of {@link MetricRegistry} to record metrics for a ApiCall Attempt - */ - MetricRegistry registerApiCallAttemptMetrics(); - - /** - * Given a {@link Metric}, registers it under the given name. - * If a metric with given name is already present, method throws {@link IllegalArgumentException}. - * - * @param name the name of the metric - * @param metric the metric - * @return the given metric - */ - Metric register(String name, Metric metric); - - /** - * Returns an optional representing the metric registered with the given name. If no metric is registered - * with the given name, an empty optional will be returned. - * - * @param name the name of the metric - * @return an optional representing the metric registered with the given name. - */ - Optional metric(String name); - - /** - * Removes the metric with the given name. - * - * @param name the name of the metric - * @return True if the metric was removed. False is the metric doesn't exist or cannot be removed - */ - boolean remove(String name); - - /** - * Return the {@link Counter} registered under this name. - * If there is none registered already, create and register a new {@link Counter}. - * - * @param name name of the metric - * @return a new or pre-existing {@link Counter} - */ - Counter counter(String name); - - /** - * Return the {@link Timer} registered under this name. - * If there is none registered already, create and register a new {@link Timer}. - * - * @param name name of the metric - * @return a new or pre-existing {@link Timer} - */ - Timer timer(String name); - - /** - * Return a {@link Gauge} registered under this name and updates its value with #value. - * If there is none registered already, create and register a new {@link Gauge} with the given initial #value. - * - * @param name name of the metric - * @param value initial value of the guage - * @param type of the value - * @return a new or pre-existing {@link Gauge} with updated value - */ - Gauge gauge(String name, T value); -} diff --git a/docs/design/core/metrics/prototype/SdkMetric.java b/docs/design/core/metrics/prototype/SdkMetric.java new file mode 100644 index 000000000000..6db104123882 --- /dev/null +++ b/docs/design/core/metrics/prototype/SdkMetric.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/** + * A specific SDK metric. + * + * @param The type for values of this metric. + */ +@SdkPublicApi +public interface SdkMetric { + + /** + * @return The name of this metric. + */ + public String name(); + + /** + * @return The categories of this metric. + */ + public Set categories(); + + /** + * @return The level of this metric. + */ + MetricLevel level(); + + /** + * @return The class of the value associated with this metric. + */ + public Class valueClass(); + + /** + * Cast the given object to the value class associated with this event. + * + * @param o The object. + * @return The cast object. + * @throws ClassCastException If {@code o} is not an instance of type {@code + * T}. + */ + public T convertValue(Object o); +} diff --git a/docs/design/core/waiters/README.md b/docs/design/core/waiters/README.md new file mode 100644 index 000000000000..32fb0ff880da --- /dev/null +++ b/docs/design/core/waiters/README.md @@ -0,0 +1,593 @@ +**Design:** New Feature, **Status:** [Proposed](../../README.md) + +# Waiters + +"Waiters" are an abstraction used to poll a resource until a desired state is reached or until it is determined that +the resource will never enter into the desired state. This feature is supported in the AWS Java SDK 1.x and this document proposes +how waiters should be implemented in the Java SDK 2.x. + +## Introduction + +A waiter makes it easier for customers to wait for a resource to transition into a desired state. It comes handy when customers are +interacting with operations that are eventually consistent on the service side. + +For example, when you invoke `dynamodb#createTable`, the service immediately returns a response with a TableStatus of `CREATING` +and the table will not be available to perform write or read until the status has transitioned to `ACTIVE`. Waiters can be used to help +you handle the task of waiting for the table to become available. + +## Proposed APIs + +The SDK 2.x will support both sync and async waiters for service clients that have waiter-eligible operations. It will also provide a generic `Waiter` class +which makes it possible for customers to customize polling function, define expected success, failure and retry conditions as well as configurations such as `maxAttempts`. + +### Usage Examples + +#### Example 1: Using sync waiters + +- instantiate a waiter object from an existing service client + +```Java +DynamoDbClient client = DynamoDbClient.create(); +DynamodbWaiter waiter = client.waiter(); + +WaiterResponse response = waiter.waitUntilTableExists(b -> b.tableName("table")); +``` + +- instantiate a waiter object from builder + +```java +DynamodbWaiter waiter = DynamoDbWaiter.builder() + .client(client) + .pollingStrategy(p -> p.maxAttempts(10)) + .build(); + +WaiterResponse response = waiter.waitUntilTableExists(b -> b.tableName("table")); + +``` + +#### Example 2: Using async waiters + +- instantiate a waiter object from an existing service client + +```Java +DynamoDbAsyncClient asyncClient = DynamoDbAsyncClient.create(); +DynamoDbAsyncWaiter waiter = asyncClient.waiter(); + +CompletableFuture> responseFuture = waiter.waitUntilTableExists(b -> b.tableName("table")); + +``` + +- instantiate a waiter object from builder + +```java +DynamoDbAsyncWaiter waiter = DynamoDbAsyncWaiter.builder() + .client(asyncClient) + .pollingStrategy(p -> p.maxAttempts(10)) + .build(); + +CompletableFuture> responseFuture = waiter.waitUntilTableExists(b -> b.tableName("table")); +``` + + +*FAQ Below: "Why not create waiter operations directly on the client?"* + +#### Example 3: Using the generic waiter + +```Java +Waiter waiter = + Waiter.builder(DescribeTableResponse.class) + .addAcceptor(WaiterAcceptor.successAcceptor(r -> r.table().tableStatus().equals(TableStatus.ACTIVE))) + .addAcceptor(WaiterAcceptor.retryAcceptor(t -> t instanceof ResourceNotFoundException)) + .addAcceptor(WaiterAcceptor.errorAcceptor(t -> t instanceof InternalServerErrorException)) + .pollingStrategy(p -> p.maxAttemps(20).backoffStrategy(BackoffStrategy.defaultStrategy()) + .build(); + +// run synchronousely +WaiterResponse response = waiter.run(() -> client.describeTable(describeTableRequest)); + +// run asychronousely +CompletableFuture> responseFuture = + waiter.runAsync(() -> asyncClient.describeTable(describeTableRequest)); +``` + +### `{Service}Waiter` and `{Service}AsyncWaiter` + +Two classes will be created for each waiter-eligible service: `{Service}Waiter` and `{Service}AsyncWaiter` (e.g. `DynamoDbWaiter`, `DynamoDbAsyncWaiter`). +This follows the naming strategy established by the current `{Service}Client` and `{Service}Utilities` classes. + +#### Example + +```Java +/** + * Waiter utility class that waits for a resource to transition to the desired state. + */ +@SdkPublicApi +@Generated("software.amazon.awssdk:codegen") +public interface DynamoDbWaiter { + + /** + * Poller method that waits for the table status to transition to ACTIVE by + * invoking {@link DynamoDbClient#describeTable}. It returns when the resource enters into a desired state or + * it is determined that the resource will never enter into the desired state. + * + * @param describeTableRequest Represents the input of a DescribeTable operation. + * @return {@link DescribeTableResponse} + */ + default WaiterResponse waitUntilTableExists(DescribeTableRequest describeTableRequest) { + throw new UnsupportedOperationException(); + } + + default WaiterResponse waitUntilTableExists(Consumer describeTableRequest) { + return waitUntilTableExists(DescribeTableRequest.builder().applyMutation(describeTableRequest).build()); + } + + /** + * Poller method that waits until the table does not exists by invoking {@link DynamoDbClient#describeTable}. + * It returns when the resource enters into a desired state or it is determined that the resource will never enter into the desired state. + * + * @param describeTableRequest Represents the input of a DescribeTable operation. + */ + default WaiterResponse waitUntilTableNotExists(DescribeTableRequest describeTableRequest) { + throw new UnsupportedOperationException(); + } + + default void waitUntilTableNotExists(Consumer describeTableRequest) { + return waitUntilTableNotExists(DescribeTableRequest.builder().applyMutation(describeTableRequest).build()); + } + + interface Builder { + + Builder client(DynamoDbClient client); + + /** + * Define the {@link PollingStrategy} that computes the delay before the next retry request. + * @param backoffStrategy the backoff strategy + * @return the chained builder + */ + Builder pollingStrategy(PollingStrategy backoffStrategy); + + DynamoDbWaiter build(); + } +} + +/** + * Waiter utility class that waits for a resource to transition to the desired state asynchronously. + */ +@SdkPublicApi +@Generated("software.amazon.awssdk:codegen") +public interface DynamoDbAsyncWaiter extends ServiceWaiter { + + /** + * Poller method that waits for the table status to transition to ACTIVE by + * invoking {@link DynamoDbClient#describeTable}. It returns when the resource enters into a desired state or + * it is determined that the resource will never enter into the desired state. + * + * @param describeTableRequest Represents the input of a DescribeTable operation. + * @return A CompletableFuture containing the result of the DescribeTable operation returned by the service. It completes + * successfully when the resource enters into a desired state or it completes exceptionally when it is determined that the + * resource will never enter into the desired state. + */ + default CompletableFuture> waitUntilTableExists(DescribeTableRequest describeTableRequest) { + throw new UnsupportedOperationException(); + } + + default CompletableFuture> waitUntilTableExists(Consumer describeTableRequest) { + return waitUntilTableExists(DescribeTableRequest.builder().applyMutation(describeTableRequest).build()); + } + + /** + * Poller method that waits until the table does not exists by invoking {@link DynamoDbClient#describeTable}. + * It returns when the resource enters into a desired state or it is determined that the resource will never enter into the desired state. + * + * @param describeTableRequest Represents the input of a DescribeTable operation. + * @return A CompletableFuture containing the result of the DescribeTable operation returned by the service. It completes + * successfully when the resource enters into a desired state or it completes exceptionally when it is determined that the + * resource will never enter into the desired state. + */ + default CompletableFuture> waitUntilTableNotExists(DescribeTableRequest describeTableRequest) { + throw new UnsupportedOperationException(); + } + + default CompletableFuture> waitUntilTableNotExists(Consumer describeTableRequest) { + return waitUntilTableNotExists(DescribeTableRequest.builder().applyMutation(describeTableRequest).build()); + } + + interface Builder { + + Builder client(DynamoDbAsyncClient client); + + /** + * Define the {@link PollingStrategy} that computes the delay before the next retry request. + * @param backoffStrategy the backoff strategy + * @return the chained builder + */ + Builder pollingStrategy(PollingStrategy backoffStrategy); + + DynamoDbAsyncWaiter build(); + } + +} +``` + +*FAQ Below: "Why returning a WaiterResponse wrapper class"*. + +#### Instantiation + +This class can be instantiated from an existing service client or builder + +- from an existing service client + +```Java +// sync waiter +DynamoDbClient dynamo = DynamoDbClient.create(); +DynamoDbWaiter dynamoWaiter = dynamo.waiter(); + +// async waiter +DynamoDbClient dynamoAsync = DynamoDbAsyncClient.create(); +DynamoDbAsyncWaiter dynamoAsyncWaiter = dynamoAsync.waiter(); +``` + +- from waiter builder + +```java +// sync waiter +DynamodbWaiter waiter = DynamoDbWaiter.builder() + .client(client) + .pollingStrategy(p -> p.maxAttempts(10)) + .build(); + + +// async waiter +DynamoDbAsyncWaiter asyncWaiter = DynamoDbAsyncWaiter.builder() + .client(asyncClient) + .pollingStrategy(p -> p.maxAttempts(10)) + .build(); + + +``` + + +#### Methods + +A method will be generated for each operation that needs waiter support. There are two categories depending on the expected success state. + + - sync: `WaiterResponse<{Operation}Response> waitUntil{DesiredState}({Operation}Request)` + ```java + WaiterResponse waitUntilTableExists(DescribeTableRequest describeTableRequest) + ``` + - async: `CompletableFuture> waitUntil{DesiredState}({Operation}Request)` + ```java + CompletableFuture> waitUntilTableExists(DescribeTableRequest describeTableRequest) + ``` + +### `WaiterResponse` +```java +/** + * The response returned from a waiter operation + * @param the type of the response + */ +@SdkPublicApi +public interface WaiterResponse { + + /** + * @return the response received that has matched with the waiter success condition + */ + Optional response(); + + + /** + * @return the optional exception that has matched with the waiter success condition + */ + Optional exception(); + +} +``` + +*FAQ Below: "Why making response and exception optional"*. + +### `Waiter` + +The generic `Waiter` class enables users to customize waiter configurations and provide their own `WaiterAcceptor`s which define the expected states and controls the terminal state of the waiter. + +#### Methods + +```java +@SdkPublicApi +public interface Waiter { + + /** + * Runs the provided polling function. It completes when the resource enters into a desired state or + * it is determined that the resource will never enter into the desired state. + * + * @param asyncPollingFunction the polling function to trigger + * @return A CompletableFuture containing the result of the DescribeTable operation returned by the service. It completes + * successfully when the resource enters into a desired state or it completes exceptionally when it is determined that the + * resource will never enter into the desired state. + */ + CompletableFuture> runAsync(Supplier> asyncPollingFunction); + + /** + * It returns when the resource enters into a desired state or + * it is determined that the resource will never enter into the desired state. + * + * @param pollingFunction Represents the input of a DescribeTable operation. + * @return the response + */ + WaiterResponse run(Supplier pollingFunction); + + + /** + * Creates a newly initialized builder for the waiter object. + * + * @param responseClass the response class + * @param the type of the response + * @return a Waiter builder + */ + static Builder builder(Class responseClass) { + return DefaultWaiter.builder(); + } +} +``` + +#### Inner-Class: `Waiter.Builder` + +```java + public interface Builder { + + /** + * Defines a list of {@link WaiterAcceptor}s to check if an expected state has met after executing an operation. + * + * @param waiterAcceptors the waiter acceptors + * @return the chained builder + */ + Builder acceptors(List> waiterAcceptors); + + /** + * Add a {@link WaiterAcceptor}s + * + * @param waiterAcceptors the waiter acceptors + * @return the chained builder + */ + Builder addAcceptor(WaiterAcceptor waiterAcceptors); + + /** + * Defines a {@link PollingStrategy} to use when polling the resource + * + * @param pollingStrategy the polling strategy to use + * @return a reference to this object so that method calls can be chained together. + */ + Builder pollingStrategy(PollingStrategy pollingStrategy); + + /** + * Define the {@link ScheduledExecutorService} used to schedule async attempts + * + * @param scheduledExecutorService the schedule executor service + * @return the chained builder + */ + Builder scheduledExecutorService(ScheduledExecutorService scheduledExecutorService); + } +``` + +#### `PollingStrategy` + +PollingStragtegy specifies how the waiter polls the resources. + +```java +public interface PollingStrategy { + + /** + * Define the maximum number of attempts to try before transitioning the waiter to a failure state. + * @return a reference to this object so that method calls can be chained together. + */ + int maxAttempts(); + + /** + * Define the {@link BackoffStrategy} that computes the delay before the next retry request. + * + * @return a reference to this object so that method calls can be chained together. + */ + BackoffStrategy backoffStrategy(); +} + +``` + +### `WaiterState` + +`WaiterState` is an enum that defines possible states of a waiter to be transitioned to if a condition is met + +```java +public enum WaiterState { + /** + * Indicates the waiter succeeded and must no longer continue waiting. + */ + SUCCESS, + + /** + * Indicates the waiter failed and must not continue waiting. + */ + FAILURE, + + /** + * Indicates that the waiter encountered an expected failure case and should retry if possible. + */ + RETRY +} +``` + +### `WaiterAcceptor` + +`WaiterAcceptor` is a class that inspects the response or error returned from the operation and determines whether an expected condition +is met and indicates the next state that the waiter should be transitioned to if there is a match. + +```java +@SdkPublicApi +public interface WaiterAcceptor { + + /** + * @return the next {@link WaiterState} that the waiter should be transitioned to if this acceptor matches with the response or error + */ + WaiterState waiterState(); + + /** + * Check to see if the response matches with the expected state defined by the acceptor + * + * @param response the response to inspect + * @return whether it accepts the response + */ + default boolean matches(T response) { + return false; + } + + /** + * Check to see if the exception matches with the expected state defined by the acceptor + * + * @param throwable the exception to inspect + * @return whether it accepts the throwable + */ + default boolean matches(Throwable throwable) { + return false; + } + + /** + * Creates a success waiter acceptor which determines if the response matches with the success state + * + * @param responsePredicate + * @param the response type + * @return a {@link WaiterAcceptor} + */ + static WaiterAcceptor successAcceptor(Predicate responsePredicate) { + return new WaiterAcceptor() { + @Override + public WaiterState waiterState() { + return WaiterState.SUCCESS; + } + + @Override + public boolean matches(T response) { + return responsePredicate.test(response); + } + }; + } + + /** + * Creates an error waiter acceptor which determines if the exception should transition the waiter to failure state + * + * @param errorPredicate + * @param the response type + * @return a {@link WaiterAcceptor} + */ + static WaiterAcceptor errorAcceptor(Predicate errorPredicate) { + return new WaiterAcceptor() { + @Override + public WaiterState waiterState() { + return WaiterState.FAILURE; + } + + @Override + public boolean matches(Throwable t) { + return errorPredicate.test(t); + } + }; + } + + /** + * Creates a retry waiter acceptor which determines if the exception should transition the waiter to retry state + * + * @param errorPredicate + * @param the response type + * @return a {@link WaiterAcceptor} + */ + static WaiterAcceptor retryAcceptor(Predicate errorPredicate) { + return new WaiterAcceptor() { + @Override + public WaiterState waiterState() { + return WaiterState.RETRY; + } + + @Override + public boolean matches(Throwable t) { + return errorPredicate.test(t); + } + }; + } +} +``` + +## FAQ + +### For which services will we generate waiters? + +We will generate a `{Service}Waiter` class if the service has any operations that need waiter support. + +### Why not create waiter operations directly on the client? + +The options are: (1) create separate waiter utility classes or (2) create waiter operations on the client + +The following compares Option 1 to Option 2, in the interest of illustrating why Option 1 was chosen. + +**Option 1:** create separate waiter utility classes + +```Java +dynamodb.waiter().untilUntilTableExists(describeTableRequest) +``` + +**Option 2:** create waiter operations on each service client + +```Java +dynamodb.waitUntilTableExists(describeTableRequest) +``` + +**Option 1 Pros:** + +1. consistent with existing s3 utilities and presigner method approach, eg: s3Client.utilities() +2. similar api to v1 waiter, and it might be easier for customers who are already using v1 waiter to migrate to v2. + +**Option 2 Pros:** + +1. slightly better discoverability + +**Decision:** Option 1 will be used, because it is consistent with existing features and option2 might bloat the size +of the client, making it more difficult to use. + +### Why returning `WaiterResponse`? + +For waiter operations that awaits a resource to be created, the last successful response sometimes contains important metadata such as resourceId, which is often required for customers to perform other actions with the resource. Without returning the response, customers will have to send an extra request to retrieve the response. This is a [feature request](https://github.com/aws/aws-sdk-java/issues/815) from v1 waiter implementation. + +For waiter operations that treats a specific exception as the success state, some customers might still want to access the exception to retrieve the requestId or raw response. + +A `WaiterResposne` wrapper class is created to provide either the response or exception depending on what triggers the waiter to reach the desired state. It also provides flexibility to add more metadata such as `attemptExecuted` in the future if needed. + + +### Why making response and exception optional in `WaiterResponse`? + +Per the SDK's style guideline `UseOfOptional`, + +> `Optional` should be used when it isn't obvious to a caller whether a result will be null. + +we make `response` and `exception` optional in `WaiterResponse` because only one of them can be present and it cannot be determined which is present at compile time. + +The following example shows how to retrieve a response from `WaiterResponse` + +```java +waiterResponse.response().ifPresent(r -> ...); + +``` + +Another approach is to create a flag field, say `isResponseAvailable`, to indicate if the response is null or not. Customers can check this before accessing `response` to avoid NPE. + +```java +if (waiterResponse.isResponseAvailable()) { + DescribeTableResponse response = waiterResponse.response(); + ... +} + +``` + +The issue with this approach is that `isResponseAvailable` might not be discovered by customers when they access `WaiterResponse` and they'll have to add null pointer check, otherwise they will end up getting NPEs. It also violates our guideline for the use of optional. + +## References + +Github feature request links: +- [Waiters](https://github.com/aws/aws-sdk-java-v2/issues/24) +- [Async requests that complete when the operation is complete](https://github.com/aws/aws-sdk-java-v2/issues/286) + diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index 2e0abdc2aacf..a5c249aedac9 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface @@ -50,6 +50,11 @@ utils ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + org.reactivestreams reactive-streams diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/DefaultSdkHttpFullRequest.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/DefaultSdkHttpFullRequest.java index 2ac33c614b3c..0a45e36f660f 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/DefaultSdkHttpFullRequest.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/DefaultSdkHttpFullRequest.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.http; +import static software.amazon.awssdk.utils.CollectionUtils.deepCopyMap; import static software.amazon.awssdk.utils.CollectionUtils.deepUnmodifiableMap; import java.util.ArrayList; @@ -53,10 +54,15 @@ private DefaultSdkHttpFullRequest(Builder builder) { this.host = Validate.paramNotNull(builder.host, "host"); this.port = standardizePort(builder.port); this.path = standardizePath(builder.path); - this.queryParameters = deepUnmodifiableMap(builder.queryParameters, () -> new LinkedHashMap<>()); this.httpMethod = Validate.paramNotNull(builder.httpMethod, "method"); - this.headers = deepUnmodifiableMap(builder.headers, () -> new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); this.contentStreamProvider = builder.contentStreamProvider; + + this.queryParameters = builder.queryParametersAreFromToBuilder + ? builder.queryParameters + : deepUnmodifiableMap(builder.queryParameters, () -> new LinkedHashMap<>()); + this.headers = builder.headersAreFromToBuilder + ? builder.headers + : deepUnmodifiableMap(builder.headers, () -> new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); } private String standardizeProtocol(String protocol) { @@ -139,16 +145,7 @@ public Optional contentStreamProvider() { @Override public SdkHttpFullRequest.Builder toBuilder() { - return new Builder() - .contentStreamProvider(contentStreamProvider) - .protocol(protocol) - .host(host) - .port(port) - .encodedPath(path) - .rawQueryParameters(queryParameters) - .method(httpMethod) - .headers(headers) - ; + return new Builder(this); } @Override @@ -172,12 +169,35 @@ static final class Builder implements SdkHttpFullRequest.Builder { private String host; private Integer port; private String path; - private Map> queryParameters = new LinkedHashMap<>(); + + private boolean queryParametersAreFromToBuilder; + private Map> queryParameters; + private SdkHttpMethod httpMethod; - private Map> headers = new LinkedHashMap<>(); + + private boolean headersAreFromToBuilder; + private Map> headers; + private ContentStreamProvider contentStreamProvider; Builder() { + queryParameters = new LinkedHashMap<>(); + queryParametersAreFromToBuilder = false; + headers = new LinkedHashMap<>(); + headersAreFromToBuilder = false; + } + + Builder(DefaultSdkHttpFullRequest request) { + queryParameters = request.queryParameters; + queryParametersAreFromToBuilder = true; + headers = request.headers; + headersAreFromToBuilder = true; + protocol = request.protocol; + host = request.host; + port = request.port; + path = request.path; + httpMethod = request.httpMethod; + contentStreamProvider = request.contentStreamProvider; } @Override @@ -226,12 +246,14 @@ public String encodedPath() { @Override public DefaultSdkHttpFullRequest.Builder putRawQueryParameter(String paramName, List paramValues) { + copyQueryParamsIfNeeded(); this.queryParameters.put(paramName, new ArrayList<>(paramValues)); return this; } @Override public SdkHttpFullRequest.Builder appendRawQueryParameter(String paramName, String paramValue) { + copyQueryParamsIfNeeded(); this.queryParameters.computeIfAbsent(paramName, k -> new ArrayList<>()).add(paramValue); return this; } @@ -239,24 +261,34 @@ public SdkHttpFullRequest.Builder appendRawQueryParameter(String paramName, Stri @Override public DefaultSdkHttpFullRequest.Builder rawQueryParameters(Map> queryParameters) { this.queryParameters = CollectionUtils.deepCopyMap(queryParameters, () -> new LinkedHashMap<>()); + queryParametersAreFromToBuilder = false; return this; } @Override public Builder removeQueryParameter(String paramName) { + copyQueryParamsIfNeeded(); this.queryParameters.remove(paramName); return this; } @Override public Builder clearQueryParameters() { - this.queryParameters.clear(); + this.queryParameters = new LinkedHashMap<>(); + queryParametersAreFromToBuilder = false; return this; } + private void copyQueryParamsIfNeeded() { + if (queryParametersAreFromToBuilder) { + queryParametersAreFromToBuilder = false; + this.queryParameters = deepCopyMap(queryParameters); + } + } + @Override public Map> rawQueryParameters() { - return CollectionUtils.deepUnmodifiableMap(this.queryParameters, () -> new LinkedHashMap<>()); + return CollectionUtils.unmodifiableMapOfLists(queryParameters); } @Override @@ -272,12 +304,14 @@ public SdkHttpMethod method() { @Override public DefaultSdkHttpFullRequest.Builder putHeader(String headerName, List headerValues) { + copyHeadersIfNeeded(); this.headers.put(headerName, new ArrayList<>(headerValues)); return this; } @Override public SdkHttpFullRequest.Builder appendHeader(String headerName, String headerValue) { + copyHeadersIfNeeded(); this.headers.computeIfAbsent(headerName, k -> new ArrayList<>()).add(headerValue); return this; } @@ -285,24 +319,34 @@ public SdkHttpFullRequest.Builder appendHeader(String headerName, String headerV @Override public DefaultSdkHttpFullRequest.Builder headers(Map> headers) { this.headers = CollectionUtils.deepCopyMap(headers); + headersAreFromToBuilder = false; return this; } @Override public SdkHttpFullRequest.Builder removeHeader(String headerName) { + copyHeadersIfNeeded(); this.headers.remove(headerName); return this; } @Override public SdkHttpFullRequest.Builder clearHeaders() { - this.headers.clear(); + this.headers = new LinkedHashMap<>(); + headersAreFromToBuilder = false; return this; } @Override public Map> headers() { - return CollectionUtils.deepUnmodifiableMap(this.headers); + return CollectionUtils.unmodifiableMapOfLists(this.headers); + } + + private void copyHeadersIfNeeded() { + if (headersAreFromToBuilder) { + headersAreFromToBuilder = false; + this.headers = deepCopyMap(headers); + } } @Override diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/DefaultSdkHttpFullResponse.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/DefaultSdkHttpFullResponse.java index 2cf8861303fd..b3cf30902668 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/DefaultSdkHttpFullResponse.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/DefaultSdkHttpFullResponse.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.http; +import static software.amazon.awssdk.utils.CollectionUtils.deepCopyMap; import static software.amazon.awssdk.utils.CollectionUtils.deepUnmodifiableMap; import java.io.Serializable; @@ -47,7 +48,9 @@ class DefaultSdkHttpFullResponse implements SdkHttpFullResponse, Serializable { private DefaultSdkHttpFullResponse(Builder builder) { this.statusCode = Validate.isNotNegative(builder.statusCode, "Status code must not be negative."); this.statusText = builder.statusText; - this.headers = deepUnmodifiableMap(builder.headers, () -> new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); + this.headers = builder.headersAreFromToBuilder + ? builder.headers + : deepUnmodifiableMap(builder.headers, () -> new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); this.content = builder.content; } @@ -83,16 +86,21 @@ static final class Builder implements SdkHttpFullResponse.Builder { private String statusText; private int statusCode; private AbortableInputStream content; - private Map> headers = new LinkedHashMap<>(); + + private boolean headersAreFromToBuilder; + private Map> headers; Builder() { + headersAreFromToBuilder = false; + headers = new LinkedHashMap<>(); } private Builder(DefaultSdkHttpFullResponse defaultSdkHttpFullResponse) { - this.statusText = defaultSdkHttpFullResponse.statusText; - this.statusCode = defaultSdkHttpFullResponse.statusCode; - this.content = defaultSdkHttpFullResponse.content; - this.headers = CollectionUtils.deepCopyMap(defaultSdkHttpFullResponse.headers); + statusText = defaultSdkHttpFullResponse.statusText; + statusCode = defaultSdkHttpFullResponse.statusCode; + content = defaultSdkHttpFullResponse.content; + headersAreFromToBuilder = true; + headers = defaultSdkHttpFullResponse.headers; } @Override @@ -132,6 +140,7 @@ public Builder content(AbortableInputStream content) { public Builder putHeader(String headerName, List headerValues) { Validate.paramNotNull(headerName, "headerName"); Validate.paramNotNull(headerValues, "headerValues"); + copyHeadersIfNeeded(); this.headers.put(headerName, new ArrayList<>(headerValues)); return this; } @@ -140,6 +149,7 @@ public Builder putHeader(String headerName, List headerValues) { public SdkHttpFullResponse.Builder appendHeader(String headerName, String headerValue) { Validate.paramNotNull(headerName, "headerName"); Validate.paramNotNull(headerValue, "headerValue"); + copyHeadersIfNeeded(); this.headers.computeIfAbsent(headerName, k -> new ArrayList<>()).add(headerValue); return this; } @@ -148,24 +158,34 @@ public SdkHttpFullResponse.Builder appendHeader(String headerName, String header public Builder headers(Map> headers) { Validate.paramNotNull(headers, "headers"); this.headers = CollectionUtils.deepCopyMap(headers); + headersAreFromToBuilder = false; return this; } @Override public Builder removeHeader(String headerName) { + copyHeadersIfNeeded(); this.headers.remove(headerName); return this; } @Override public Builder clearHeaders() { - this.headers.clear(); + this.headers = new LinkedHashMap<>(); + headersAreFromToBuilder = false; return this; } @Override public Map> headers() { - return deepUnmodifiableMap(this.headers); + return CollectionUtils.unmodifiableMapOfLists(this.headers); + } + + private void copyHeadersIfNeeded() { + if (headersAreFromToBuilder) { + headersAreFromToBuilder = false; + this.headers = deepCopyMap(headers); + } } /** diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/Header.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/Header.java index 10ace60280d5..4e23000e408f 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/Header.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/Header.java @@ -33,6 +33,12 @@ public final class Header { public static final String CHUNKED = "chunked"; + public static final String HOST = "Host"; + + public static final String CONNECTION = "Connection"; + + public static final String KEEP_ALIVE_VALUE = "keep-alive"; + private Header() { } diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/Http2Metric.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/Http2Metric.java new file mode 100644 index 000000000000..4c8a5eb719fc --- /dev/null +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/Http2Metric.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http; + +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; + +/** + * Metrics collected by HTTP clients for HTTP/2 operations. See {@link HttpMetric} for metrics that are available on both HTTP/1 + * and HTTP/2 operations. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@SdkPublicApi +public final class Http2Metric { + /** + * The local HTTP/2 window size in bytes for the stream that this request was executed on. + * + *

See https://http2.github.io/http2-spec/#FlowControl for more information on HTTP/2 window sizes. + */ + public static final SdkMetric LOCAL_STREAM_WINDOW_SIZE_IN_BYTES = + metric("LocalStreamWindowSize", Integer.class, MetricLevel.TRACE); + + /** + * The remote HTTP/2 window size in bytes for the stream that this request was executed on. + * + *

See https://http2.github.io/http2-spec/#FlowControl for more information on HTTP/2 window sizes. + */ + public static final SdkMetric REMOTE_STREAM_WINDOW_SIZE_IN_BYTES = + metric("RemoteStreamWindowSize", Integer.class, MetricLevel.TRACE); + + private Http2Metric() { + } + + private static SdkMetric metric(String name, Class clzz, MetricLevel level) { + return SdkMetric.create(name, clzz, level, MetricCategory.CORE, MetricCategory.HTTP_CLIENT); + } +} diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/HttpExecuteRequest.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/HttpExecuteRequest.java index c814a4f5cebd..9fd8b25c2784 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/HttpExecuteRequest.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/HttpExecuteRequest.java @@ -17,6 +17,7 @@ import java.util.Optional; import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.metrics.MetricCollector; /** * Request object containing the parameters necessary to make a synchronous HTTP request. @@ -28,10 +29,12 @@ public final class HttpExecuteRequest { private final SdkHttpRequest request; private final Optional contentStreamProvider; + private final MetricCollector metricCollector; private HttpExecuteRequest(BuilderImpl builder) { this.request = builder.request; this.contentStreamProvider = builder.contentStreamProvider; + this.metricCollector = builder.metricCollector; } /** @@ -48,6 +51,13 @@ public Optional contentStreamProvider() { return contentStreamProvider; } + /** + * @return The {@link MetricCollector}. + */ + public Optional metricCollector() { + return Optional.ofNullable(metricCollector); + } + public static Builder builder() { return new BuilderImpl(); } @@ -68,12 +78,22 @@ public interface Builder { */ Builder contentStreamProvider(ContentStreamProvider contentStreamProvider); + /** + * Set the {@link MetricCollector} to be used by the HTTP client to + * report metrics collected for this request. + * + * @param metricCollector The metric collector. + * @return This builder for method chaining. + */ + Builder metricCollector(MetricCollector metricCollector); + HttpExecuteRequest build(); } private static class BuilderImpl implements Builder { private SdkHttpRequest request; private Optional contentStreamProvider = Optional.empty(); + private MetricCollector metricCollector; @Override public Builder request(SdkHttpRequest request) { @@ -87,6 +107,12 @@ public Builder contentStreamProvider(ContentStreamProvider contentStreamProvider return this; } + @Override + public Builder metricCollector(MetricCollector metricCollector) { + this.metricCollector = metricCollector; + return this; + } + @Override public HttpExecuteRequest build() { return new HttpExecuteRequest(this); diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/HttpMetric.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/HttpMetric.java new file mode 100644 index 000000000000..40703b10306b --- /dev/null +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/HttpMetric.java @@ -0,0 +1,117 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http; + +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; + +/** + * Metrics collected by HTTP clients for HTTP/1 and HTTP/2 operations. See {@link Http2Metric} for metrics that are only available + * on HTTP/2 operations. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@SdkPublicApi +public final class HttpMetric { + /** + * The name of the HTTP client. + */ + public static final SdkMetric HTTP_CLIENT_NAME = + metric("HttpClientName", String.class, MetricLevel.INFO); + + /** + * The maximum number of concurrent requests that is supported by the HTTP client. + * + *

For HTTP/1 operations, this is equal to the maximum number of TCP connections that can be be pooled by the HTTP client. + * For HTTP/2 operations, this is equal to the maximum number of streams that can be pooled by the HTTP client. + * + *

Note: Depending on the HTTP client, this is either a value for all endpoints served by the HTTP client, or a value + * that applies only to the specific endpoint/host used in the request. For 'apache-http-client', this value is + * for the entire HTTP client. For 'netty-nio-client', this value is per-endpoint. In all cases, this value is scoped to an + * individual HTTP client instance, and does not include concurrency that may be available in other HTTP clients running + * within the same JVM. + */ + public static final SdkMetric MAX_CONCURRENCY = + metric("MaxConcurrency", Integer.class, MetricLevel.INFO); + + /** + * The number of additional concurrent requests that can be supported by the HTTP client without needing to establish + * additional connections to the target server. + * + *

For HTTP/1 operations, this is equal to the number of TCP connections that have been established with the service, + * but are currently idle/unused. For HTTP/2 operations, this is equal to the number of streams that are currently + * idle/unused. + * + *

Note: Depending on the HTTP client, this is either a value for all endpoints served by the HTTP client, or a value + * that applies only to the specific endpoint/host used in the request. For 'apache-http-client', this value is + * for the entire HTTP client. For 'netty-nio-client', this value is per-endpoint. In all cases, this value is scoped to an + * individual HTTP client instance, and does not include concurrency that may be available in other HTTP clients running + * within the same JVM. + */ + public static final SdkMetric AVAILABLE_CONCURRENCY = + metric("AvailableConcurrency", Integer.class, MetricLevel.INFO); + + /** + * The number of requests that are currently being executed by the HTTP client. + * + *

For HTTP/1 operations, this is equal to the number of TCP connections currently in active communication with the service + * (excluding idle connections). For HTTP/2 operations, this is equal to the number of HTTP streams currently in active + * communication with the service (excluding idle stream capacity). + * + *

Note: Depending on the HTTP client, this is either a value for all endpoints served by the HTTP client, or a value + * that applies only to the specific endpoint/host used in the request. For 'apache-http-client', this value is + * for the entire HTTP client. For 'netty-nio-client', this value is per-endpoint. In all cases, this value is scoped to an + * individual HTTP client instance, and does not include concurrency that may be available in other HTTP clients running + * within the same JVM. + */ + public static final SdkMetric LEASED_CONCURRENCY = + metric("LeasedConcurrency", Integer.class, MetricLevel.INFO); + + /** + * The number of requests that are awaiting concurrency to be made available from the HTTP client. + * + *

For HTTP/1 operations, this is equal to the number of requests currently blocked, waiting for a TCP connection to be + * established or returned from the connection pool. For HTTP/2 operations, this is equal to the number of requests currently + * blocked, waiting for a new stream (and possibly a new HTTP/2 connection) from the connection pool. + * + *

Note: Depending on the HTTP client, this is either a value for all endpoints served by the HTTP client, or a value + * that applies only to the specific endpoint/host used in the request. For 'apache-http-client', this value is + * for the entire HTTP client. For 'netty-nio-client', this value is per-endpoint. In all cases, this value is scoped to an + * individual HTTP client instance, and does not include concurrency that may be available in other HTTP clients running + * within the same JVM. + */ + public static final SdkMetric PENDING_CONCURRENCY_ACQUIRES = + metric("PendingConcurrencyAcquires", Integer.class, MetricLevel.INFO); + + /** + * The status code of the HTTP response. + * + * @implSpec This is reported by the SDK core, and should not be reported by an individual HTTP client implementation. + */ + public static final SdkMetric HTTP_STATUS_CODE = + metric("HttpStatusCode", Integer.class, MetricLevel.TRACE); + + private HttpMetric() { + } + + private static SdkMetric metric(String name, Class clzz, MetricLevel level) { + return SdkMetric.create(name, clzz, level, MetricCategory.CORE, MetricCategory.HTTP_CLIENT); + } +} diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpResponse.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpResponse.java index 157c6ff6a03e..61e02e528d78 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpResponse.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpResponse.java @@ -91,7 +91,7 @@ interface Builder extends CopyableBuilder { Builder statusCode(int statusCode); /** - * The query parameters, exactly as they were configured with {@link #headers(Map)}, + * The HTTP headers, exactly as they were configured with {@link #headers(Map)}, * {@link #putHeader(String, String)} and {@link #putHeader(String, List)}. */ Map> headers(); diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/async/AsyncExecuteRequest.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/async/AsyncExecuteRequest.java index d3de2981bbe8..debeaf794d55 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/async/AsyncExecuteRequest.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/async/AsyncExecuteRequest.java @@ -15,8 +15,10 @@ package software.amazon.awssdk.http.async; +import java.util.Optional; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.metrics.MetricCollector; /** * Request object containing the parameters necessary to make an asynchronous HTTP request. @@ -28,12 +30,14 @@ public final class AsyncExecuteRequest { private final SdkHttpRequest request; private final SdkHttpContentPublisher requestContentPublisher; private final SdkAsyncHttpResponseHandler responseHandler; + private final MetricCollector metricCollector; private final boolean isFullDuplex; private AsyncExecuteRequest(BuilderImpl builder) { this.request = builder.request; this.requestContentPublisher = builder.requestContentPublisher; this.responseHandler = builder.responseHandler; + this.metricCollector = builder.metricCollector; this.isFullDuplex = builder.isFullDuplex; } @@ -58,6 +62,13 @@ public SdkAsyncHttpResponseHandler responseHandler() { return responseHandler; } + /** + * @return The {@link MetricCollector}. + */ + public Optional metricCollector() { + return Optional.ofNullable(metricCollector); + } + /** * @return True if the operation this request belongs to is full duplex. Otherwise false. */ @@ -94,6 +105,15 @@ public interface Builder { */ Builder responseHandler(SdkAsyncHttpResponseHandler responseHandler); + /** + * Set the {@link MetricCollector} to be used by the HTTP client to + * report metrics collected for this request. + * + * @param metricCollector The metric collector. + * @return This builder for method chaining. + */ + Builder metricCollector(MetricCollector metricCollector); + /** * Option to indicate if the request is for a full duplex operation ie., request and response are sent/received at * the same time. @@ -112,6 +132,7 @@ private static class BuilderImpl implements Builder { private SdkHttpRequest request; private SdkHttpContentPublisher requestContentPublisher; private SdkAsyncHttpResponseHandler responseHandler; + private MetricCollector metricCollector; private boolean isFullDuplex; @Override @@ -132,6 +153,12 @@ public Builder responseHandler(SdkAsyncHttpResponseHandler responseHandler) { return this; } + @Override + public Builder metricCollector(MetricCollector metricCollector) { + this.metricCollector = metricCollector; + return this; + } + @Override public Builder fullDuplex(boolean fullDuplex) { isFullDuplex = fullDuplex; diff --git a/http-client-spi/src/test/java/software/amazon/awssdk/http/SdkHttpRequestResponseTest.java b/http-client-spi/src/test/java/software/amazon/awssdk/http/SdkHttpRequestResponseTest.java index 4f9e509c99f5..3848b6407b78 100644 --- a/http-client-spi/src/test/java/software/amazon/awssdk/http/SdkHttpRequestResponseTest.java +++ b/http-client-spi/src/test/java/software/amazon/awssdk/http/SdkHttpRequestResponseTest.java @@ -15,14 +15,17 @@ package software.amazon.awssdk.http; +import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import java.util.AbstractMap; import java.util.Arrays; import java.util.Collections; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.function.Consumer; @@ -41,6 +44,107 @@ public void optionalValuesAreOptional() { assertThat(validResponse().statusText()).isNotPresent(); } + @Test + public void mapsAreNotCopiedWhenRoundTrippedToBuilderWithoutModification() { + SdkHttpFullRequest request = validRequestWithMaps(); + SdkHttpFullRequest request2 = request.toBuilder().build(); + assertThat(request2.headers()).isSameAs(request.headers()); + assertThat(request2.rawQueryParameters()).isSameAs(request.rawQueryParameters()); + + SdkHttpResponse response = validResponseWithMaps(); + SdkHttpResponse response2 = response.toBuilder().build(); + assertThat(response2.headers()).isSameAs(response.headers()); + } + + @Test + public void requestHeaderMapsAreCopiedWhenModified() { + assertRequestHeaderMapsAreCopied(b -> b.putHeader("foo", "bar")); + assertRequestHeaderMapsAreCopied(b -> b.putHeader("foo", singletonList("bar"))); + assertRequestHeaderMapsAreCopied(b -> b.appendHeader("foo", "bar")); + assertRequestHeaderMapsAreCopied(b -> b.headers(emptyMap())); + assertRequestHeaderMapsAreCopied(b -> b.clearHeaders()); + assertRequestHeaderMapsAreCopied(b -> b.removeHeader("Accept")); + } + + @Test + public void requestQueryStringMapsAreCopiedWhenModified() { + assertRequestQueryStringMapsAreCopied(b -> b.putRawQueryParameter("foo", "bar")); + assertRequestQueryStringMapsAreCopied(b -> b.putRawQueryParameter("foo", singletonList("bar"))); + assertRequestQueryStringMapsAreCopied(b -> b.appendRawQueryParameter("foo", "bar")); + assertRequestQueryStringMapsAreCopied(b -> b.rawQueryParameters(emptyMap())); + assertRequestQueryStringMapsAreCopied(b -> b.clearQueryParameters()); + assertRequestQueryStringMapsAreCopied(b -> b.removeQueryParameter("Accept")); + } + + @Test + public void responseHeaderMapsAreCopiedWhenModified() { + assertResponseHeaderMapsAreCopied(b -> b.putHeader("foo", "bar")); + assertResponseHeaderMapsAreCopied(b -> b.putHeader("foo", singletonList("bar"))); + assertResponseHeaderMapsAreCopied(b -> b.appendHeader("foo", "bar")); + assertResponseHeaderMapsAreCopied(b -> b.headers(emptyMap())); + assertResponseHeaderMapsAreCopied(b -> b.clearHeaders()); + assertResponseHeaderMapsAreCopied(b -> b.removeHeader("Accept")); + } + + private void assertRequestHeaderMapsAreCopied(Consumer mutation) { + SdkHttpFullRequest request = validRequestWithMaps(); + Map> originalQuery = new LinkedHashMap<>(request.headers()); + SdkHttpFullRequest.Builder builder = request.toBuilder(); + + assertThat(request.headers()).isEqualTo(builder.headers()); + + builder.applyMutation(mutation); + SdkHttpFullRequest request2 = builder.build(); + + assertThat(request.headers()).isEqualTo(originalQuery); + assertThat(request.headers()).isNotEqualTo(request2.headers()); + } + + private void assertRequestQueryStringMapsAreCopied(Consumer mutation) { + SdkHttpFullRequest request = validRequestWithMaps(); + Map> originalQuery = new LinkedHashMap<>(request.rawQueryParameters()); + SdkHttpFullRequest.Builder builder = request.toBuilder(); + + assertThat(request.rawQueryParameters()).isEqualTo(builder.rawQueryParameters()); + + builder.applyMutation(mutation); + SdkHttpFullRequest request2 = builder.build(); + + assertThat(request.rawQueryParameters()).isEqualTo(originalQuery); + assertThat(request.rawQueryParameters()).isNotEqualTo(request2.rawQueryParameters()); + } + + private void assertResponseHeaderMapsAreCopied(Consumer mutation) { + SdkHttpResponse response = validResponseWithMaps(); + Map> originalQuery = new LinkedHashMap<>(response.headers()); + SdkHttpResponse.Builder builder = response.toBuilder(); + + assertThat(response.headers()).isEqualTo(builder.headers()); + + builder.applyMutation(mutation); + SdkHttpResponse response2 = builder.build(); + + assertThat(response.headers()).isEqualTo(originalQuery); + assertThat(response.headers()).isNotEqualTo(response2.headers()); + } + + @Test + public void headersAreUnmodifiable() { + assertThatThrownBy(() -> validRequest().headers().clear()).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(() -> validResponse().headers().clear()).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(() -> validRequest().toBuilder().headers().clear()).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(() -> validResponse().toBuilder().headers().clear()).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(() -> validRequest().toBuilder().build().headers().clear()).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(() -> validResponse().toBuilder().build().headers().clear()).isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void queryStringsAreUnmodifiable() { + assertThatThrownBy(() -> validRequest().rawQueryParameters().clear()).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(() -> validRequest().toBuilder().rawQueryParameters().clear()).isInstanceOf(UnsupportedOperationException.class); + assertThatThrownBy(() -> validRequest().toBuilder().build().rawQueryParameters().clear()).isInstanceOf(UnsupportedOperationException.class); + } + @Test public void uriConversionIsCorrect() { assertThat(normalizedUri(b -> b.protocol("http").host("localhost"))).isEqualTo("http://localhost"); @@ -332,7 +436,7 @@ private void headerOrQueryStringNormalizationIsCorrect(Supplier bu } private void assertMapIsInitiallyEmpty(Supplier builderFactory) { - assertThat(builderFactory.get().setMap(Collections.emptyMap()).getMap()).isEmpty(); + assertThat(builderFactory.get().setMap(emptyMap()).getMap()).isEmpty(); } private void setValue_SetsSingleValueCorrectly(Supplier builderFactory) { @@ -411,12 +515,32 @@ private void appendWithNoValues_AddsSingleElementToList(Supplier b }); } + private SdkHttpFullRequest validRequestWithMaps() { + return validRequestWithMapsBuilder().build(); + } + + private SdkHttpFullRequest.Builder validRequestWithMapsBuilder() { + return validRequestBuilder().putHeader("Accept", "*/*") + .putRawQueryParameter("Accept", "*/*"); + } + private SdkHttpFullRequest validRequest() { return validRequestBuilder().build(); } private SdkHttpFullRequest.Builder validRequestBuilder() { - return SdkHttpFullRequest.builder().protocol("http").host("localhost").method(SdkHttpMethod.GET); + return SdkHttpFullRequest.builder() + .protocol("http") + .host("localhost") + .method(SdkHttpMethod.GET); + } + + private SdkHttpResponse validResponseWithMaps() { + return validResponseWithMapsBuilder().build(); + } + + private SdkHttpResponse.Builder validResponseWithMapsBuilder() { + return validResponseBuilder().putHeader("Accept", "*/*"); } private SdkHttpFullResponse validResponse() { diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index a99ac6a73308..41e45616d09e 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT apache-client @@ -33,6 +33,11 @@ http-client-spi ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + software.amazon.awssdk utils diff --git a/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/ApacheHttpClient.java b/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/ApacheHttpClient.java index 0367a86c6d80..e0b8ac71bf8e 100644 --- a/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/ApacheHttpClient.java +++ b/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/ApacheHttpClient.java @@ -18,6 +18,11 @@ import static java.util.stream.Collectors.groupingBy; import static java.util.stream.Collectors.mapping; import static java.util.stream.Collectors.toList; +import static software.amazon.awssdk.http.HttpMetric.AVAILABLE_CONCURRENCY; +import static software.amazon.awssdk.http.HttpMetric.HTTP_CLIENT_NAME; +import static software.amazon.awssdk.http.HttpMetric.LEASED_CONCURRENCY; +import static software.amazon.awssdk.http.HttpMetric.MAX_CONCURRENCY; +import static software.amazon.awssdk.http.HttpMetric.PENDING_CONCURRENCY_ACQUIRES; import static software.amazon.awssdk.utils.NumericUtils.saturatedCast; import java.io.IOException; @@ -57,6 +62,7 @@ import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.conn.DefaultSchemePortResolver; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.apache.http.pool.PoolStats; import org.apache.http.protocol.HttpRequestExecutor; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; @@ -81,6 +87,8 @@ import software.amazon.awssdk.http.apache.internal.impl.ApacheSdkHttpClient; import software.amazon.awssdk.http.apache.internal.impl.ConnectionManagerAwareHttpClient; import software.amazon.awssdk.http.apache.internal.utils.ApacheUtils; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.NoOpMetricCollector; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Validate; @@ -206,11 +214,15 @@ private boolean isProxyEnabled(ProxyConfiguration proxyConfiguration) { @Override public ExecutableHttpRequest prepareRequest(HttpExecuteRequest request) { + MetricCollector metricCollector = request.metricCollector().orElseGet(NoOpMetricCollector::create); + metricCollector.reportMetric(HTTP_CLIENT_NAME, clientName()); HttpRequestBase apacheRequest = toApacheRequest(request); return new ExecutableHttpRequest() { @Override public HttpExecuteResponse call() throws IOException { - return execute(apacheRequest); + HttpExecuteResponse executeResponse = execute(apacheRequest); + collectPoolMetric(metricCollector); + return executeResponse; } @Override @@ -283,6 +295,18 @@ private ApacheHttpRequestConfig createRequestConfig(DefaultBuilder builder, .build(); } + private void collectPoolMetric(MetricCollector metricCollector) { + HttpClientConnectionManager cm = httpClient.getHttpClientConnectionManager(); + if (cm instanceof PoolingHttpClientConnectionManager) { + PoolingHttpClientConnectionManager poolingCm = (PoolingHttpClientConnectionManager) cm; + PoolStats totalStats = poolingCm.getTotalStats(); + metricCollector.reportMetric(MAX_CONCURRENCY, totalStats.getMax()); + metricCollector.reportMetric(AVAILABLE_CONCURRENCY, totalStats.getAvailable()); + metricCollector.reportMetric(LEASED_CONCURRENCY, totalStats.getLeased()); + metricCollector.reportMetric(PENDING_CONCURRENCY_ACQUIRES, totalStats.getPending()); + } + } + @Override public String clientName() { return CLIENT_NAME; diff --git a/http-clients/apache-client/src/test/java/software/amazon/awssdk/http/apache/MetricReportingTest.java b/http-clients/apache-client/src/test/java/software/amazon/awssdk/http/apache/MetricReportingTest.java new file mode 100644 index 000000000000..fe41f7893595 --- /dev/null +++ b/http-clients/apache-client/src/test/java/software/amazon/awssdk/http/apache/MetricReportingTest.java @@ -0,0 +1,129 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.apache; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.http.HttpMetric.AVAILABLE_CONCURRENCY; +import static software.amazon.awssdk.http.HttpMetric.HTTP_CLIENT_NAME; +import static software.amazon.awssdk.http.HttpMetric.LEASED_CONCURRENCY; +import static software.amazon.awssdk.http.HttpMetric.MAX_CONCURRENCY; +import static software.amazon.awssdk.http.HttpMetric.PENDING_CONCURRENCY_ACQUIRES; +import java.io.IOException; +import java.time.Duration; +import org.apache.http.HttpVersion; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.conn.HttpClientConnectionManager; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.apache.http.message.BasicHttpResponse; +import org.apache.http.pool.PoolStats; +import org.apache.http.protocol.HttpContext; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.apache.internal.ApacheHttpRequestConfig; +import software.amazon.awssdk.http.apache.internal.impl.ConnectionManagerAwareHttpClient; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.AttributeMap; + +@RunWith(MockitoJUnitRunner.class) +public class MetricReportingTest { + + @Mock + public ConnectionManagerAwareHttpClient mockHttpClient; + + @Mock + public PoolingHttpClientConnectionManager cm; + + @Before + public void methodSetup() throws IOException { + when(mockHttpClient.execute(any(HttpUriRequest.class), any(HttpContext.class))) + .thenReturn(new BasicHttpResponse(HttpVersion.HTTP_1_1, 200, "OK")); + when(mockHttpClient.getHttpClientConnectionManager()).thenReturn(cm); + + PoolStats stats = new PoolStats(1, 2, 3, 4); + when(cm.getTotalStats()).thenReturn(stats); + } + + @Test + public void prepareRequest_callableCalled_metricsReported() throws IOException { + ApacheHttpClient client = newClient(); + MetricCollector collector = MetricCollector.create("test"); + HttpExecuteRequest executeRequest = newRequest(collector); + + client.prepareRequest(executeRequest).call(); + + MetricCollection collected = collector.collect(); + + assertThat(collected.metricValues(HTTP_CLIENT_NAME)).containsExactly("Apache"); + assertThat(collected.metricValues(LEASED_CONCURRENCY)).containsExactly(1); + assertThat(collected.metricValues(PENDING_CONCURRENCY_ACQUIRES)).containsExactly(2); + assertThat(collected.metricValues(AVAILABLE_CONCURRENCY)).containsExactly(3); + assertThat(collected.metricValues(MAX_CONCURRENCY)).containsExactly(4); + } + + @Test + public void prepareRequest_connectionManagerNotPooling_callableCalled_metricsReported() throws IOException { + ApacheHttpClient client = newClient(); + when(mockHttpClient.getHttpClientConnectionManager()).thenReturn(mock(HttpClientConnectionManager.class)); + MetricCollector collector = MetricCollector.create("test"); + HttpExecuteRequest executeRequest = newRequest(collector); + + client.prepareRequest(executeRequest).call(); + + MetricCollection collected = collector.collect(); + + assertThat(collected.metricValues(HTTP_CLIENT_NAME)).containsExactly("Apache"); + assertThat(collected.metricValues(LEASED_CONCURRENCY)).isEmpty(); + assertThat(collected.metricValues(PENDING_CONCURRENCY_ACQUIRES)).isEmpty(); + assertThat(collected.metricValues(AVAILABLE_CONCURRENCY)).isEmpty(); + assertThat(collected.metricValues(MAX_CONCURRENCY)).isEmpty(); + } + + private ApacheHttpClient newClient() { + ApacheHttpRequestConfig config = ApacheHttpRequestConfig.builder() + .connectionAcquireTimeout(Duration.ofDays(1)) + .connectionTimeout(Duration.ofDays(1)) + .socketTimeout(Duration.ofDays(1)) + .proxyConfiguration(ProxyConfiguration.builder().build()) + .build(); + + return new ApacheHttpClient(mockHttpClient, config, AttributeMap.empty()); + } + + private HttpExecuteRequest newRequest(MetricCollector collector) { + final SdkHttpFullRequest sdkRequest = SdkHttpFullRequest.builder() + .method(SdkHttpMethod.HEAD) + .host("amazonaws.com") + .protocol("https") + .build(); + + HttpExecuteRequest executeRequest = HttpExecuteRequest.builder() + .request(sdkRequest) + .metricCollector(collector) + .build(); + + return executeRequest; + } +} diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index 55a360c5e780..186892a83913 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 @@ -33,7 +33,7 @@ software.amazon.awssdk.crt aws-crt - 0.5.1 + ${awscrt.version} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtAsyncHttpClient.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtAsyncHttpClient.java index 69add45a203a..46a8ffb928d8 100644 --- a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtAsyncHttpClient.java +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtAsyncHttpClient.java @@ -18,7 +18,9 @@ import static software.amazon.awssdk.utils.CollectionUtils.isNullOrEmpty; import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; +import java.io.IOException; import java.net.URI; +import java.time.Duration; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; @@ -26,12 +28,15 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.crt.CrtResource; +import software.amazon.awssdk.crt.CrtRuntimeException; import software.amazon.awssdk.crt.http.HttpClientConnectionManager; import software.amazon.awssdk.crt.http.HttpClientConnectionManagerOptions; import software.amazon.awssdk.crt.http.HttpHeader; +import software.amazon.awssdk.crt.http.HttpMonitoringOptions; +import software.amazon.awssdk.crt.http.HttpProxyOptions; import software.amazon.awssdk.crt.http.HttpRequest; import software.amazon.awssdk.crt.io.ClientBootstrap; import software.amazon.awssdk.crt.io.EventLoopGroup; @@ -40,6 +45,7 @@ import software.amazon.awssdk.crt.io.TlsCipherPreference; import software.amazon.awssdk.crt.io.TlsContext; import software.amazon.awssdk.crt.io.TlsContextOptions; +import software.amazon.awssdk.http.Header; import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.http.SdkHttpConfigurationOption; import software.amazon.awssdk.http.SdkHttpRequest; @@ -59,76 +65,96 @@ *

This can be created via {@link #builder()}

*/ @SdkPublicApi -public class AwsCrtAsyncHttpClient implements SdkAsyncHttpClient { +public final class AwsCrtAsyncHttpClient implements SdkAsyncHttpClient { private static final Logger log = Logger.loggerFor(AwsCrtAsyncHttpClient.class); - private static final String HOST_HEADER = "Host"; - private static final String CONTENT_LENGTH = "Content-Length"; - private static final String CONNECTION = "Connection"; - private static final String KEEP_ALIVE = "keep-alive"; + private static final String AWS_COMMON_RUNTIME = "AwsCommonRuntime"; + private static final String NULL_REQUEST_ERROR_MESSAGE = "SdkHttpRequest must not be null"; + private static final String NULL_URI_ERROR_MESSAGE = "URI must not be null"; private static final int DEFAULT_STREAM_WINDOW_SIZE = 16 * 1024 * 1024; // 16 MB private final Map connectionPools = new ConcurrentHashMap<>(); private final LinkedList ownedSubResources = new LinkedList<>(); - private final AtomicBoolean isClosed = new AtomicBoolean(false); private final ClientBootstrap bootstrap; private final SocketOptions socketOptions; - private final TlsContextOptions tlsContextOptions; private final TlsContext tlsContext; - private final int windowSize; + private final HttpProxyOptions proxyOptions; + private final HttpMonitoringOptions monitoringOptions; + private final long maxConnectionIdleInMilliseconds; + private final int initialWindowSize; private final int maxConnectionsPerEndpoint; - private final boolean manualWindowManagement; + private boolean isClosed = false; - public AwsCrtAsyncHttpClient(DefaultBuilder builder, AttributeMap config) { + private AwsCrtAsyncHttpClient(DefaultBuilder builder, AttributeMap config) { int maxConns = config.get(SdkHttpConfigurationOption.MAX_CONNECTIONS); Validate.isPositive(maxConns, "maxConns"); Validate.notNull(builder.cipherPreference, "cipherPreference"); - Validate.isPositive(builder.windowSize, "windowSize"); + Validate.isPositive(builder.initialWindowSize, "initialWindowSize"); Validate.notNull(builder.eventLoopGroup, "eventLoopGroup"); Validate.notNull(builder.hostResolver, "hostResolver"); - /** - * Must call own() in same order that CrtResources are created in, so that they will be closed in reverse order. - * - * Do NOT use Dependency Injection for Native CrtResources. It's possible to crash the JVM Process if Native - * Resources are closed in the wrong order (Eg closing the Bootstrap/Threadpool when there are still open - * connections). By creating and owning our own Native CrtResources we can guarantee that things are shutdown - * in the correct order. - */ + try (ClientBootstrap clientBootstrap = new ClientBootstrap(builder.eventLoopGroup, builder.hostResolver); + SocketOptions clientSocketOptions = new SocketOptions(); + TlsContextOptions clientTlsContextOptions = TlsContextOptions.createDefaultClient() // NOSONAR + .withCipherPreference(builder.cipherPreference) + .withVerifyPeer(!config.get(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES)); + TlsContext clientTlsContext = new TlsContext(clientTlsContextOptions)) { - this.bootstrap = own(new ClientBootstrap(builder.eventLoopGroup, builder.hostResolver)); - this.socketOptions = own(new SocketOptions()); + this.bootstrap = registerOwnedResource(clientBootstrap); + this.socketOptions = registerOwnedResource(clientSocketOptions); + this.tlsContext = registerOwnedResource(clientTlsContext); - /** - * Sonar raises a false-positive that the TlsContextOptions created here will not be closed. Using a "NOSONAR" - * comment so that Sonar will ignore that false-positive. - */ - this.tlsContextOptions = own(TlsContextOptions.createDefaultClient() // NOSONAR - .withCipherPreference(builder.cipherPreference) - .withVerifyPeer(builder.verifyPeer)); - - this.tlsContext = own(new TlsContext(this.tlsContextOptions)); - this.windowSize = builder.windowSize; - this.maxConnectionsPerEndpoint = maxConns; - this.manualWindowManagement = builder.manualWindowManagement; + this.initialWindowSize = builder.initialWindowSize; + this.maxConnectionsPerEndpoint = maxConns; + this.monitoringOptions = builder.monitoringOptions; + this.maxConnectionIdleInMilliseconds = config.get(SdkHttpConfigurationOption.CONNECTION_MAX_IDLE_TIMEOUT).toMillis(); + + this.proxyOptions = buildProxyOptions(builder.proxyConfiguration); + } + } + + private HttpProxyOptions buildProxyOptions(ProxyConfiguration proxyConfiguration) { + if (proxyConfiguration != null) { + HttpProxyOptions clientProxyOptions = new HttpProxyOptions(); + + clientProxyOptions.setHost(proxyConfiguration.host()); + clientProxyOptions.setPort(proxyConfiguration.port()); + if (proxyConfiguration.scheme() != null && proxyConfiguration.scheme().equalsIgnoreCase("https")) { + clientProxyOptions.setTlsContext(tlsContext); + } + + if (proxyConfiguration.username() != null && proxyConfiguration.password() != null) { + clientProxyOptions.setAuthorizationUsername(proxyConfiguration.username()); + clientProxyOptions.setAuthorizationPassword(proxyConfiguration.password()); + clientProxyOptions.setAuthorizationType(HttpProxyOptions.HttpProxyAuthorizationType.Basic); + } else { + clientProxyOptions.setAuthorizationType(HttpProxyOptions.HttpProxyAuthorizationType.None); + } + + return clientProxyOptions; + } else { + return null; + } } /** * Marks a Native CrtResource as owned by the current Java Object. - * This will guarantee that any owned CrtResources are closed in reverse order when this Java Object is closed. * * @param subresource The Resource to own. * @param The CrtResource Type * @return The CrtResource passed in */ - private T own(T subresource) { - ownedSubResources.push(subresource); + private T registerOwnedResource(T subresource) { + if (subresource != null) { + subresource.addRef(); + ownedSubResources.push(subresource); + } return subresource; } private static URI toUri(SdkHttpRequest sdkRequest) { - Validate.notNull(sdkRequest, "SdkHttpRequest must not be null"); + Validate.notNull(sdkRequest, NULL_REQUEST_ERROR_MESSAGE); return invokeSafely(() -> new URI(sdkRequest.protocol(), null, sdkRequest.host(), sdkRequest.port(), null, null, null)); } @@ -143,7 +169,6 @@ public String clientName() { } private HttpClientConnectionManager createConnectionPool(URI uri) { - Validate.notNull(uri, "URI must not be null"); log.debug(() -> "Creating ConnectionPool for: URI:" + uri + ", MaxConns: " + maxConnectionsPerEndpoint); HttpClientConnectionManagerOptions options = new HttpClientConnectionManagerOptions() @@ -151,51 +176,63 @@ private HttpClientConnectionManager createConnectionPool(URI uri) { .withSocketOptions(socketOptions) .withTlsContext(tlsContext) .withUri(uri) - .withWindowSize(windowSize) + .withWindowSize(initialWindowSize) .withMaxConnections(maxConnectionsPerEndpoint) - .withManualWindowManagement(manualWindowManagement); + .withManualWindowManagement(true) + .withProxyOptions(proxyOptions) + .withMonitoringOptions(monitoringOptions) + .withMaxConnectionIdleInMilliseconds(maxConnectionIdleInMilliseconds); return HttpClientConnectionManager.create(options); } + /* + * Callers of this function MUST account for the addRef() on the pool before returning. + * Every execution path consuming the return value must guarantee an associated close(). + * Currently this function is only used by execute(), which guarantees a matching close + * via the try-with-resources block. + * + * This guarantees that a returned pool will not get closed (by closing the http client) during + * the time it takes to submit a request to the pool. Acquisition requests submitted to the pool will + * be properly failed if the http client is closed before the acquisition completes. + * + * This additional complexity means we only have to keep a lock for the scope of this function, as opposed to + * the scope of calling execute(). This function will almost always just be a hash lookup and the return of an + * existing pool. If we add all of execute() to the scope, we include, at minimum a JNI call to the native + * pool implementation. + */ private HttpClientConnectionManager getOrCreateConnectionPool(URI uri) { - Validate.notNull(uri, "URI must not be null"); - HttpClientConnectionManager connPool = connectionPools.get(uri); - - if (connPool == null) { - HttpClientConnectionManager newConnPool = createConnectionPool(uri); - HttpClientConnectionManager alreadyExistingConnPool = connectionPools.putIfAbsent(uri, newConnPool); - - if (alreadyExistingConnPool == null) { - connPool = newConnPool; - } else { - // Multiple threads trying to open connections to the same URI at once, close the newer one - newConnPool.close(); - connPool = alreadyExistingConnPool; + Validate.notNull(uri, NULL_URI_ERROR_MESSAGE); + synchronized (this) { + if (isClosed) { + throw new IllegalStateException("Client is closed. No more requests can be made with this client."); } - } - return connPool; + HttpClientConnectionManager connPool = connectionPools.computeIfAbsent(uri, this::createConnectionPool); + connPool.addRef(); + return connPool; + } } private List createHttpHeaderList(URI uri, AsyncExecuteRequest asyncRequest) { SdkHttpRequest sdkRequest = asyncRequest.request(); - List crtHeaderList = new ArrayList<>(sdkRequest.headers().size() + 2); + // worst case we may add 3 more headers here + List crtHeaderList = new ArrayList<>(sdkRequest.headers().size() + 3); // Set Host Header if needed - if (isNullOrEmpty(sdkRequest.headers().get(HOST_HEADER))) { - crtHeaderList.add(new HttpHeader(HOST_HEADER, uri.getHost())); + if (isNullOrEmpty(sdkRequest.headers().get(Header.HOST))) { + crtHeaderList.add(new HttpHeader(Header.HOST, uri.getHost())); } // Add Connection Keep Alive Header to reuse this Http Connection as long as possible - if (isNullOrEmpty(sdkRequest.headers().get(CONNECTION))) { - crtHeaderList.add(new HttpHeader(CONNECTION, KEEP_ALIVE)); + if (isNullOrEmpty(sdkRequest.headers().get(Header.CONNECTION))) { + crtHeaderList.add(new HttpHeader(Header.CONNECTION, Header.KEEP_ALIVE_VALUE)); } // Set Content-Length if needed Optional contentLength = asyncRequest.requestContentPublisher().contentLength(); - if (isNullOrEmpty(sdkRequest.headers().get(CONTENT_LENGTH)) && contentLength.isPresent()) { - crtHeaderList.add(new HttpHeader(CONTENT_LENGTH, Long.toString(contentLength.get()))); + if (isNullOrEmpty(sdkRequest.headers().get(Header.CONTENT_LENGTH)) && contentLength.isPresent()) { + crtHeaderList.add(new HttpHeader(Header.CONTENT_LENGTH, Long.toString(contentLength.get()))); } // Add the rest of the Headers @@ -215,11 +252,15 @@ private HttpHeader[] asArray(List crtHeaderList) { private HttpRequest toCrtRequest(URI uri, AsyncExecuteRequest asyncRequest, AwsCrtAsyncHttpStreamAdapter crtToSdkAdapter) { SdkHttpRequest sdkRequest = asyncRequest.request(); - Validate.notNull(uri, "URI must not be null"); - Validate.notNull(sdkRequest, "SdkHttpRequest must not be null"); + Validate.notNull(uri, NULL_URI_ERROR_MESSAGE); + Validate.notNull(sdkRequest, NULL_REQUEST_ERROR_MESSAGE); String method = sdkRequest.method().name(); String encodedPath = sdkRequest.encodedPath(); + if (encodedPath == null || encodedPath.length() == 0) { + encodedPath = "/"; + } + String encodedQueryString = SdkHttpUtils.encodeAndFlattenQueryParameters(sdkRequest.rawQueryParameters()) .map(value -> "?" + value) .orElse(""); @@ -231,48 +272,73 @@ private HttpRequest toCrtRequest(URI uri, AsyncExecuteRequest asyncRequest, AwsC @Override public CompletableFuture execute(AsyncExecuteRequest asyncRequest) { - if (isClosed.get()) { - throw new IllegalStateException("Client is closed. No more requests can be made with this client."); - } + Validate.notNull(asyncRequest, "AsyncExecuteRequest must not be null"); - Validate.notNull(asyncRequest.request(), "SdkHttpRequest must not be null"); + Validate.notNull(asyncRequest.request(), NULL_REQUEST_ERROR_MESSAGE); Validate.notNull(asyncRequest.requestContentPublisher(), "RequestContentPublisher must not be null"); Validate.notNull(asyncRequest.responseHandler(), "ResponseHandler must not be null"); URI uri = toUri(asyncRequest.request()); - HttpClientConnectionManager crtConnPool = getOrCreateConnectionPool(uri); - CompletableFuture requestFuture = new CompletableFuture<>(); - - // When a Connection is ready from the Connection Pool, schedule the Request on the connection - crtConnPool.acquireConnection() - .whenComplete((crtConn, throwable) -> { - // If we didn't get a connection for some reason, fail the request - if (throwable != null) { - requestFuture.completeExceptionally(throwable); - return; - } - - AwsCrtAsyncHttpStreamAdapter crtToSdkAdapter = - new AwsCrtAsyncHttpStreamAdapter(crtConn, requestFuture, asyncRequest, windowSize); - HttpRequest crtRequest = toCrtRequest(uri, asyncRequest, crtToSdkAdapter); - - // Submit the Request on this Connection - invokeSafely(() -> crtConn.makeRequest(crtRequest, crtToSdkAdapter).activate()); - }); - - return requestFuture; + + /* + * See the note on getOrCreateConnectionPool() + * + * In particular, this returns a ref-counted object and calling getOrCreateConnectionPool + * increments the ref count by one. We add a try-with-resources to release our ref + * once we have successfully submitted a request. In this way, we avoid a race condition + * when close/shutdown is called from another thread while this function is executing (ie. + * we have a pool and no one can destroy it underneath us until we've finished submitting the + * request) + */ + try (HttpClientConnectionManager crtConnPool = getOrCreateConnectionPool(uri)) { + CompletableFuture requestFuture = new CompletableFuture<>(); + + // When a Connection is ready from the Connection Pool, schedule the Request on the connection + crtConnPool.acquireConnection() + .whenComplete((crtConn, throwable) -> { + // If we didn't get a connection for some reason, fail the request + if (throwable != null) { + try { + asyncRequest.responseHandler().onError(throwable); + } catch (Exception e) { + log.error(() -> String.format("Exception while handling error: %s", e.toString())); + } + requestFuture.completeExceptionally(new IOException( + "Crt exception while acquiring connection", throwable)); + return; + } + + AwsCrtAsyncHttpStreamAdapter crtToSdkAdapter = + new AwsCrtAsyncHttpStreamAdapter(crtConn, requestFuture, asyncRequest, initialWindowSize); + HttpRequest crtRequest = toCrtRequest(uri, asyncRequest, crtToSdkAdapter); + + // Submit the Request on this Connection + invokeSafely(() -> { + try { + crtConn.makeRequest(crtRequest, crtToSdkAdapter).activate(); + } catch (IllegalStateException | CrtRuntimeException e) { + throw new IOException("Exception throw while submitting request to CRT http connection", e); + } + }); + }); + + return requestFuture; + } } @Override public void close() { - isClosed.set(true); - for (HttpClientConnectionManager connPool : connectionPools.values()) { - IoUtils.closeQuietly(connPool, log.logger()); - } + synchronized (this) { + + if (isClosed) { + return; + } + + connectionPools.values().forEach(pool -> IoUtils.closeQuietly(pool, log.logger())); + ownedSubResources.forEach(r -> IoUtils.closeQuietly(r, log.logger())); + ownedSubResources.clear(); - while (ownedSubResources.size() > 0) { - CrtResource r = ownedSubResources.pop(); - IoUtils.closeQuietly(r, log.logger()); + isClosed = true; } } @@ -282,36 +348,30 @@ public void close() { public interface Builder extends SdkAsyncHttpClient.Builder { /** - * The AWS CRT TlsCipherPreference to use for this Client - * @param tlsCipherPreference The AWS Common Runtime TlsCipherPreference - * @return The builder of the method chaining. - */ - Builder tlsCipherPreference(TlsCipherPreference tlsCipherPreference); - - /** - * Whether or not to Verify the Peer's TLS Certificate Chain. - * @param verifyPeer true if the Certificate Chain should be validated, false if validation should be skipped. + * The maximum number of connections allowed per distinct endpoint + * @param maxConnections maximum connections per endpoint * @return The builder of the method chaining. */ - Builder verifyPeer(boolean verifyPeer); + Builder maxConnections(int maxConnections); /** - * If set to true, then the TCP read back pressure mechanism will be enabled, and the user - * is responsible for calling incrementWindow on the stream object. - * @param manualWindowManagement true if the TCP back pressure mechanism should be enabled. + * The AWS CRT TlsCipherPreference to use for this Client + * @param tlsCipherPreference The AWS Common Runtime TlsCipherPreference * @return The builder of the method chaining. */ - Builder manualWindowManagement(boolean manualWindowManagement); + Builder tlsCipherPreference(TlsCipherPreference tlsCipherPreference); /** - * The AWS CRT WindowSize to use for this HttpClient. This represents the number of unread bytes that can be - * buffered in the ResponseBodyPublisher before we stop reading from the underlying TCP socket and wait for - * the Subscriber to read more data. + * The AWS CRT WindowSize to use for this HttpClient. + * + * For an http/1.1 connection, this represents the number of unread bytes that can be buffered in the + * ResponseBodyPublisher before we stop reading from the underlying TCP socket and wait for the Subscriber + * to read more data. * - * @param windowSize The AWS Common Runtime WindowSize + * @param initialWindowSize The AWS Common Runtime WindowSize * @return The builder of the method chaining. */ - Builder windowSize(int windowSize); + Builder initialWindowSize(int initialWindowSize); /** * The AWS CRT EventLoopGroup to use for this Client. @@ -326,6 +386,33 @@ public interface Builder extends SdkAsyncHttpClient.Builder proxyConfigurationBuilderConsumer); } /** @@ -335,11 +422,11 @@ public interface Builder extends SdkAsyncHttpClient.Builder proxyConfigurationBuilderConsumer) { + ProxyConfiguration.Builder builder = ProxyConfiguration.builder(); + proxyConfigurationBuilderConsumer.accept(builder); + return proxyConfiguration(builder.build()); + } } } diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/ProxyConfiguration.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/ProxyConfiguration.java new file mode 100644 index 000000000000..644c736fa6c5 --- /dev/null +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/ProxyConfiguration.java @@ -0,0 +1,236 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + + +/** + * Proxy configuration for {@link AwsCrtAsyncHttpClient}. This class is used to configure an HTTP proxy to be used by + * the {@link AwsCrtAsyncHttpClient}. + * + * @see AwsCrtAsyncHttpClient.Builder#proxyConfiguration(ProxyConfiguration) + */ +@SdkPublicApi +public final class ProxyConfiguration implements ToCopyableBuilder { + private final String scheme; + private final String host; + private final int port; + + private final String username; + private final String password; + + private ProxyConfiguration(BuilderImpl builder) { + this.scheme = builder.scheme; + this.host = builder.host; + this.port = builder.port; + this.username = builder.username; + this.password = builder.password; + } + + /** + * @return The proxy scheme. + */ + public String scheme() { + return scheme; + } + + /** + * @return The proxy host. + */ + public String host() { + return host; + } + + /** + * @return The proxy port. + */ + public int port() { + return port; + } + + /** + * @return Basic authentication username + */ + public String username() { + return username; + } + + /** + * @return Basic authentication password + */ + public String password() { + return password; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + ProxyConfiguration that = (ProxyConfiguration) o; + + if (port != that.port) { + return false; + } + + if (!Objects.equals(this.scheme, that.scheme)) { + return false; + } + + if (!Objects.equals(this.host, that.host)) { + return false; + } + + if (!Objects.equals(this.username, that.username)) { + return false; + } + + return Objects.equals(this.password, that.password); + } + + @Override + public int hashCode() { + int result = scheme != null ? scheme.hashCode() : 0; + result = 31 * result + (host != null ? host.hashCode() : 0); + result = 31 * result + port; + result = 31 * result + (username != null ? username.hashCode() : 0); + result = 31 * result + (password != null ? password.hashCode() : 0); + + return result; + } + + @Override + public Builder toBuilder() { + return new BuilderImpl(this); + } + + public static Builder builder() { + return new BuilderImpl(); + } + + /** + * Builder for {@link ProxyConfiguration}. + */ + public interface Builder extends CopyableBuilder { + + /** + * Set the hostname of the proxy. + * @param host The proxy host. + * @return This object for method chaining. + */ + Builder host(String host); + + /** + * Set the port that the proxy expects connections on. + * @param port The proxy port. + * @return This object for method chaining. + */ + Builder port(int port); + + /** + * The HTTP scheme to use for connecting to the proxy. Valid values are {@code http} and {@code https}. + *

+ * The client defaults to {@code http} if none is given. + * + * @param scheme The proxy scheme. + * @return This object for method chaining. + */ + Builder scheme(String scheme); + + /** + * The username to use for basic proxy authentication + *

+ * If not set, the client will not use basic authentication + * + * @param username The basic authentication username. + * @return This object for method chaining. + */ + Builder username(String username); + + /** + * The password to use for basic proxy authentication + *

+ * If not set, the client will not use basic authentication + * + * @param password The basic authentication password. + * @return This object for method chaining. + */ + Builder password(String password); + } + + private static final class BuilderImpl implements Builder { + private String scheme; + private String host; + private int port; + private String username; + private String password; + + private BuilderImpl() { + } + + private BuilderImpl(ProxyConfiguration proxyConfiguration) { + this.scheme = proxyConfiguration.scheme; + this.host = proxyConfiguration.host; + this.port = proxyConfiguration.port; + this.username = proxyConfiguration.username; + this.password = proxyConfiguration.password; + } + + @Override + public Builder scheme(String scheme) { + this.scheme = scheme; + return this; + } + + @Override + public Builder host(String host) { + this.host = host; + return this; + } + + @Override + public Builder port(int port) { + this.port = port; + return this; + } + + @Override + public Builder username(String username) { + this.username = username; + return this; + } + + @Override + public Builder password(String password) { + this.password = password; + return this; + } + + @Override + public ProxyConfiguration build() { + return new ProxyConfiguration(this); + } + } +} diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtAsyncHttpStreamAdapter.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtAsyncHttpStreamAdapter.java index 730a85413ad6..5f1f79578b94 100644 --- a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtAsyncHttpStreamAdapter.java +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtAsyncHttpStreamAdapter.java @@ -22,9 +22,11 @@ import software.amazon.awssdk.crt.http.HttpClientConnection; import software.amazon.awssdk.crt.http.HttpException; import software.amazon.awssdk.crt.http.HttpHeader; +import software.amazon.awssdk.crt.http.HttpHeaderBlock; import software.amazon.awssdk.crt.http.HttpRequestBodyStream; import software.amazon.awssdk.crt.http.HttpStream; import software.amazon.awssdk.crt.http.HttpStreamResponseHandler; +import software.amazon.awssdk.http.HttpStatusFamily; import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.utils.Logger; @@ -47,15 +49,10 @@ public class AwsCrtAsyncHttpStreamAdapter implements HttpStreamResponseHandler, public AwsCrtAsyncHttpStreamAdapter(HttpClientConnection connection, CompletableFuture responseComplete, AsyncExecuteRequest sdkRequest, int windowSize) { - Validate.notNull(connection, "HttpConnection is null"); - Validate.notNull(responseComplete, "reqComplete Future is null"); - Validate.notNull(sdkRequest, "AsyncExecuteRequest Future is null"); - Validate.isPositive(windowSize, "windowSize is <= 0"); - - this.connection = connection; - this.responseComplete = responseComplete; - this.sdkRequest = sdkRequest; - this.windowSize = windowSize; + this.connection = Validate.notNull(connection, "HttpConnection is null"); + this.responseComplete = Validate.notNull(responseComplete, "reqComplete Future is null"); + this.sdkRequest = Validate.notNull(sdkRequest, "AsyncExecuteRequest Future is null"); + this.windowSize = Validate.isPositive(windowSize, "windowSize is <= 0"); this.requestBodySubscriber = new AwsCrtRequestBodySubscriber(windowSize); sdkRequest.requestContentPublisher().subscribe(requestBodySubscriber); @@ -71,8 +68,6 @@ private void initRespBodyPublisherIfNeeded(HttpStream stream) { public void onResponseHeaders(HttpStream stream, int responseStatusCode, int blockType, HttpHeader[] nextHeaders) { initRespBodyPublisherIfNeeded(stream); - respBuilder.statusCode(responseStatusCode); - for (HttpHeader h : nextHeaders) { respBuilder.appendHeader(h.getName(), h.getValue()); } @@ -80,25 +75,28 @@ public void onResponseHeaders(HttpStream stream, int responseStatusCode, int blo @Override public void onResponseHeadersDone(HttpStream stream, int headerType) { - initRespBodyPublisherIfNeeded(stream); + if (headerType == HttpHeaderBlock.MAIN.getValue()) { + initRespBodyPublisherIfNeeded(stream); - respBuilder.statusCode(stream.getResponseStatusCode()); - sdkRequest.responseHandler().onHeaders(respBuilder.build()); - sdkRequest.responseHandler().onStream(respBodyPublisher); + respBuilder.statusCode(stream.getResponseStatusCode()); + sdkRequest.responseHandler().onHeaders(respBuilder.build()); + sdkRequest.responseHandler().onStream(respBodyPublisher); + } } @Override public int onResponseBody(HttpStream stream, byte[] bodyBytesIn) { initRespBodyPublisherIfNeeded(stream); - if (respBodyPublisher == null) { - log.error(() -> "Publisher is null, onResponseHeadersDone() was never called"); - throw new IllegalStateException("Publisher is null, onResponseHeadersDone() was never called"); - } - respBodyPublisher.queueBuffer(bodyBytesIn); respBodyPublisher.publishToSubscribers(); + /* + * Intentionally zero. We manually manage the crt stream's window within the body publisher by updating with + * the exact amount we were able to push to the subcriber. + * + * See the call to stream.incrementWindow() in AwsCrtResponseBodyPublisher. + */ return 0; } @@ -106,6 +104,10 @@ public int onResponseBody(HttpStream stream, byte[] bodyBytesIn) { public void onResponseComplete(HttpStream stream, int errorCode) { initRespBodyPublisherIfNeeded(stream); + if (HttpStatusFamily.of(respBuilder.statusCode()) == HttpStatusFamily.SERVER_ERROR) { + connection.shutdown(); + } + if (errorCode == CRT.AWS_CRT_SUCCESS) { log.debug(() -> "Response Completed Successfully"); respBodyPublisher.setQueueComplete(); @@ -115,7 +117,12 @@ public void onResponseComplete(HttpStream stream, int errorCode) { log.error(() -> "Response Encountered an Error.", error); // Invoke Error Callback on SdkAsyncHttpResponseHandler - sdkRequest.responseHandler().onError(error); + try { + sdkRequest.responseHandler().onError(error); + } catch (Exception e) { + log.error(() -> String.format("SdkAsyncHttpResponseHandler %s threw an exception in onError: %s", + sdkRequest.responseHandler(), e)); + } // Invoke Error Callback on any Subscriber's of the Response Body respBodyPublisher.setError(error); diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtRequestBodySubscriber.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtRequestBodySubscriber.java index b9790dcb1f1d..15bb7835719f 100644 --- a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtRequestBodySubscriber.java +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtRequestBodySubscriber.java @@ -109,7 +109,7 @@ public synchronized boolean transferRequestBody(ByteBuffer out) { throw new RuntimeException(error.get()); } - while (out.remaining() > 0 && queuedBuffers.size() > 0) { + while (out.remaining() > 0 && !queuedBuffers.isEmpty()) { ByteBuffer nextBuffer = queuedBuffers.peek(); int amtTransferred = transferData(nextBuffer, out); queuedByteCount.addAndGet(-amtTransferred); @@ -119,7 +119,7 @@ public synchronized boolean transferRequestBody(ByteBuffer out) { } } - boolean endOfStream = isComplete.get() && (queuedBuffers.size() == 0); + boolean endOfStream = isComplete.get() && queuedBuffers.isEmpty(); if (!endOfStream) { requestDataIfNecessary(); diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtResponseBodyPublisher.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtResponseBodyPublisher.java index ebc7888634b6..4c1b88be86d7 100644 --- a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtResponseBodyPublisher.java +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/internal/AwsCrtResponseBodyPublisher.java @@ -27,6 +27,7 @@ import java.util.function.LongUnaryOperator; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.crt.http.HttpClientConnection; import software.amazon.awssdk.crt.http.HttpStream; @@ -64,14 +65,10 @@ public class AwsCrtResponseBodyPublisher implements Publisher { */ public AwsCrtResponseBodyPublisher(HttpClientConnection connection, HttpStream stream, CompletableFuture responseComplete, int windowSize) { - Validate.notNull(connection, "HttpConnection must not be null"); - Validate.notNull(stream, "Stream must not be null"); - Validate.notNull(responseComplete, "Stream must not be null"); - Validate.isPositive(windowSize, "windowSize must be > 0"); - this.connection = connection; - this.stream = stream; - this.responseComplete = responseComplete; - this.windowSize = windowSize; + this.connection = Validate.notNull(connection, "HttpConnection must not be null"); + this.stream = Validate.notNull(stream, "Stream must not be null"); + this.responseComplete = Validate.notNull(responseComplete, "ResponseComplete future must not be null"); + this.windowSize = Validate.isPositive(windowSize, "windowSize must be > 0"); } /** @@ -86,11 +83,23 @@ public void subscribe(Subscriber subscriber) { if (!wasFirstSubscriber) { log.error(() -> "Only one subscriber allowed"); + + // onSubscribe must be called first before onError gets called, so give it a do-nothing Subscription + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + // This is a dummy implementation to allow the onError call + } + + @Override + public void cancel() { + // This is a dummy implementation to allow the onError call + } + }); subscriber.onError(new IllegalStateException("Only one subscriber allowed")); - return; + } else { + subscriber.onSubscribe(new AwsCrtResponseBodySubscription(this)); } - - subscriber.onSubscribe(new AwsCrtResponseBodySubscription(this)); } /** @@ -185,7 +194,7 @@ protected void completeSubscriptionExactlyOnce() { } // Subscriber may have cancelled their subscription, in which case this may be null. - Optional subscriber = Optional.ofNullable(subscriberRef.getAndSet(null)); + Optional> subscriber = Optional.ofNullable(subscriberRef.getAndSet(null)); Throwable throwable = error.get(); @@ -195,11 +204,19 @@ protected void completeSubscriptionExactlyOnce() { // Complete the Futures if (throwable != null) { log.error(() -> "Error before ResponseBodyPublisher could complete: " + throwable.getMessage()); - subscriber.ifPresent(s -> s.onError(throwable)); + try { + subscriber.ifPresent(s -> s.onError(throwable)); + } catch (Exception e) { + log.warn(() -> "Failed to exceptionally complete subscriber future with: " + throwable.getMessage()); + } responseComplete.completeExceptionally(throwable); } else { log.debug(() -> "ResponseBodyPublisher Completed Successfully"); - subscriber.ifPresent(s -> s.onComplete()); + try { + subscriber.ifPresent(Subscriber::onComplete); + } catch (Exception e) { + log.warn(() -> "Failed to successfully complete subscriber future"); + } responseComplete.complete(null); } } @@ -213,49 +230,54 @@ protected void completeSubscriptionExactlyOnce() { * calling queuedBuffers.poll(), but then have the 2nd thread call subscriber.onNext(buffer) first, resulting in the * subscriber seeing out-of-order data. To avoid this race condition, this method must be synchronized. */ - protected synchronized void publishToSubscribers() { - if (error.get() != null) { - completeSubscriptionExactlyOnce(); - return; - } - - if (isSubscriptionComplete.get() || isCancelled.get()) { - log.warn(() -> "Subscription already completed or cancelled, can't publish updates to Subscribers."); - return; - } - - if (mutualRecursionDepth.get() > 0) { - /** - * If our depth is > 0, then we already made a call to publishToSubscribers() further up the stack that - * will continue publishing to subscribers, and this call should return without completing work to avoid - * infinite recursive loop between: "subscription.request() -> subscriber.onNext() -> subscription.request()" - */ - return; - } - - int totalAmountTransferred = 0; - - while (outstandingRequests.get() > 0 && queuedBuffers.size() > 0) { - byte[] buffer = queuedBuffers.poll(); - outstandingRequests.getAndUpdate(DECREMENT_IF_GREATER_THAN_ZERO); - int amount = buffer.length; - publishWithoutMutualRecursion(subscriberRef.get(), ByteBuffer.wrap(buffer)); - totalAmountTransferred += amount; - } - - if (totalAmountTransferred > 0) { - queuedBytes.addAndGet(-totalAmountTransferred); - - // We may have released the Native HttpConnection and HttpStream if they completed before the Subscriber - // has finished reading the data. - if (!areNativeResourcesReleased.get()) { - // Open HttpStream's IO window so HttpStream can keep track of IO back-pressure - stream.incrementWindow(totalAmountTransferred); + protected void publishToSubscribers() { + boolean shouldComplete = true; + synchronized (this) { + if (error.get() == null) { + if (isSubscriptionComplete.get() || isCancelled.get()) { + log.debug(() -> "Subscription already completed or cancelled, can't publish updates to Subscribers."); + return; + } + + if (mutualRecursionDepth.get() > 0) { + /** + * If our depth is > 0, then we already made a call to publishToSubscribers() further up the stack that + * will continue publishing to subscribers, and this call should return without completing work to avoid + * infinite recursive loop between: "subscription.request() -> subscriber.onNext() -> subscription.request()" + */ + return; + } + + int totalAmountTransferred = 0; + + while (outstandingRequests.get() > 0 && !queuedBuffers.isEmpty()) { + byte[] buffer = queuedBuffers.poll(); + outstandingRequests.getAndUpdate(DECREMENT_IF_GREATER_THAN_ZERO); + int amount = buffer.length; + publishWithoutMutualRecursion(subscriberRef.get(), ByteBuffer.wrap(buffer)); + totalAmountTransferred += amount; + } + + if (totalAmountTransferred > 0) { + queuedBytes.addAndGet(-totalAmountTransferred); + + // We may have released the Native HttpConnection and HttpStream if they completed before the Subscriber + // has finished reading the data. + if (!areNativeResourcesReleased.get()) { + // Open HttpStream's IO window so HttpStream can keep track of IO back-pressure + // This is why it is correct to return 0 from AwsCrtAsyncHttpStreamAdapter::onResponseBody + stream.incrementWindow(totalAmountTransferred); + } + } + + shouldComplete = queueComplete.get() && queuedBuffers.isEmpty(); + } else { + shouldComplete = true; } } // Check if Complete, consider no subscriber as a completion. - if (queueComplete.get() && queuedBuffers.size() == 0) { + if (shouldComplete) { completeSubscriptionExactlyOnce(); } } @@ -282,4 +304,30 @@ private synchronized void publishWithoutMutualRecursion(Subscriber executeFuture = client.execute(AsyncExecuteRequest.builder() .request(request) @@ -144,7 +141,7 @@ public void onStream(Publisher stream) { } }; - SdkHttpRequest request = createRequest(URI.create("http://localhost:" + mockServer.port())); + SdkHttpRequest request = CrtHttpClientTestUtils.createRequest(URI.create("http://localhost:" + mockServer.port())); CompletableFuture future = client.execute(AsyncExecuteRequest.builder() .request(request) @@ -192,7 +189,7 @@ public void onError(Throwable t) { }; URI uri = URI.create("http://localhost:" + mockServer.port()); - SdkHttpRequest request = createRequest(uri, path, null, SdkHttpMethod.GET, emptyMap()); + SdkHttpRequest request = CrtHttpClientTestUtils.createRequest(uri, path, null, SdkHttpMethod.GET, emptyMap()); CompletableFuture future = client.execute(AsyncExecuteRequest.builder() .request(request) @@ -214,44 +211,13 @@ private void makePutRequest(String path, byte[] reqBody, int expectedStatus) thr final AtomicReference response = new AtomicReference<>(null); final AtomicReference error = new AtomicReference<>(null); - Subscriber subscriber = new Subscriber() { - @Override - public void onSubscribe(Subscription subscription) { - subscription.request(Long.MAX_VALUE); - } - - @Override - public void onNext(ByteBuffer byteBuffer) { - } + Subscriber subscriber = CrtHttpClientTestUtils.createDummySubscriber(); - @Override - public void onError(Throwable throwable) { - } - - @Override - public void onComplete() { - } - }; - - SdkAsyncHttpResponseHandler handler = new SdkAsyncHttpResponseHandler() { - @Override - public void onHeaders(SdkHttpResponse headers) { - response.compareAndSet(null, headers); - } - @Override - public void onStream(Publisher stream) { - stream.subscribe(subscriber); - streamReceived.complete(true); - } - - @Override - public void onError(Throwable t) { - error.compareAndSet(null, t); - } - }; + SdkAsyncHttpResponseHandler handler = CrtHttpClientTestUtils.createTestResponseHandler(response, + streamReceived, error, subscriber); URI uri = URI.create("http://localhost:" + mockServer.port()); - SdkHttpRequest request = createRequest(uri, path, reqBody, SdkHttpMethod.PUT, emptyMap()); + SdkHttpRequest request = CrtHttpClientTestUtils.createRequest(uri, path, reqBody, SdkHttpMethod.PUT, emptyMap()); CompletableFuture future = client.execute(AsyncExecuteRequest.builder() .request(request) @@ -278,29 +244,7 @@ public void testPutRequest() throws Exception { makePutRequest(pathExpect404, randomBody, 404); } - private SdkHttpFullRequest createRequest(URI endpoint) { - return createRequest(endpoint, "/", null, SdkHttpMethod.GET, emptyMap()); - } - private SdkHttpFullRequest createRequest(URI endpoint, - String resourcePath, - byte[] body, - SdkHttpMethod method, - Map params) { - - String contentLength = (body == null) ? null : String.valueOf(body.length); - return SdkHttpFullRequest.builder() - .uri(endpoint) - .method(method) - .encodedPath(resourcePath) - .applyMutation(b -> params.forEach(b::putRawQueryParameter)) - .applyMutation(b -> { - b.putHeader("Host", endpoint.getHost()); - if (contentLength != null) { - b.putHeader("Content-Length", contentLength); - } - }).build(); - } private static class TestResponseHandler implements SdkAsyncHttpResponseHandler { @Override diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/CrtHttpClientTestUtils.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/CrtHttpClientTestUtils.java new file mode 100644 index 000000000000..3d1619cf26b6 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/CrtHttpClientTestUtils.java @@ -0,0 +1,87 @@ +package software.amazon.awssdk.http.crt; + +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; + +import java.net.URI; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.emptyMap; + +public class CrtHttpClientTestUtils { + + static Subscriber createDummySubscriber() { + return new Subscriber() { + @Override + public void onSubscribe(Subscription subscription) { + subscription.request(Long.MAX_VALUE); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + } + + @Override + public void onError(Throwable throwable) { + } + + @Override + public void onComplete() { + } + }; + } + + static SdkAsyncHttpResponseHandler createTestResponseHandler(AtomicReference response, + CompletableFuture streamReceived, + AtomicReference error, + Subscriber subscriber) { + return new SdkAsyncHttpResponseHandler() { + @Override + public void onHeaders(SdkHttpResponse headers) { + response.compareAndSet(null, headers); + } + @Override + public void onStream(Publisher stream) { + stream.subscribe(subscriber); + streamReceived.complete(true); + } + + @Override + public void onError(Throwable t) { + error.compareAndSet(null, t); + } + }; + } + + static SdkHttpFullRequest createRequest(URI endpoint) { + return createRequest(endpoint, "/", null, SdkHttpMethod.GET, emptyMap()); + } + + static SdkHttpFullRequest createRequest(URI endpoint, + String resourcePath, + byte[] body, + SdkHttpMethod method, + Map params) { + + String contentLength = (body == null) ? null : String.valueOf(body.length); + return SdkHttpFullRequest.builder() + .uri(endpoint) + .method(method) + .encodedPath(resourcePath) + .applyMutation(b -> params.forEach(b::putRawQueryParameter)) + .applyMutation(b -> { + b.putHeader("Host", endpoint.getHost()); + if (contentLength != null) { + b.putHeader("Content-Length", contentLength); + } + }).build(); + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/H1ServerBehaviorTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/H1ServerBehaviorTest.java new file mode 100644 index 000000000000..8c4ce411d951 --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/H1ServerBehaviorTest.java @@ -0,0 +1,83 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.H1ServerBehaviorTestBase; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.utils.AttributeMap; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; + +/** + * Testing the scenario where h1 server sends 5xx errors. + */ +public class H1ServerBehaviorTest extends H1ServerBehaviorTestBase { + private SdkAsyncHttpClient crtClient; + + @Override + protected SdkAsyncHttpClient getTestClient() { return crtClient; } + + @Before + public void setup() throws Exception { + super.setup(); + + int numThreads = Runtime.getRuntime().availableProcessors(); + try (EventLoopGroup eventLoopGroup = new EventLoopGroup(numThreads); + HostResolver hostResolver = new HostResolver(eventLoopGroup)) { + + crtClient = AwsCrtAsyncHttpClient.builder() + .eventLoopGroup(eventLoopGroup) + .hostResolver(hostResolver) + .buildWithDefaults(AttributeMap.builder().put(TRUST_ALL_CERTIFICATES, true).build()); + } + } + + + @After + public void teardown() throws InterruptedException { + super.teardown(); + + if (crtClient != null) { + crtClient.close(); + } + crtClient = null; + } + + @Test + public void connectionReceiveServerErrorStatusShouldNotReuseConnection() { + assertThat(crtClient).isNotNull(); + super.connectionReceiveServerErrorStatusShouldNotReuseConnection(); + } + + @Test + public void connectionReceiveOkStatusShouldReuseConnection() { + assertThat(crtClient).isNotNull(); + super.connectionReceiveOkStatusShouldReuseConnection(); + } + + @Test + public void connectionReceiveCloseHeaderShouldNotReuseConnection() throws InterruptedException { + assertThat(crtClient).isNotNull(); + super.connectionReceiveCloseHeaderShouldNotReuseConnection(); + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyConfigurationTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyConfigurationTest.java new file mode 100644 index 000000000000..3f01c7a7774d --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyConfigurationTest.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.crt; + +import static org.assertj.core.api.Assertions.assertThat; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.Random; +import java.util.stream.Stream; +import org.junit.Test; + +/** + * Tests for {@link ProxyConfiguration}. + */ +public class ProxyConfigurationTest { + private static final Random RNG = new Random(); + + @Test + public void build_setsAllProperties() { + verifyAllPropertiesSet(allPropertiesSetConfig()); + } + + @Test + public void toBuilder_roundTrip_producesExactCopy() { + ProxyConfiguration original = allPropertiesSetConfig(); + + ProxyConfiguration copy = original.toBuilder().build(); + + assertThat(copy).isEqualTo(original); + } + + @Test + public void toBuilderModified_doesNotModifySource() { + ProxyConfiguration original = allPropertiesSetConfig(); + + ProxyConfiguration modified = setAllPropertiesToRandomValues(original.toBuilder()).build(); + + assertThat(original).isNotEqualTo(modified); + } + + private ProxyConfiguration allPropertiesSetConfig() { + return setAllPropertiesToRandomValues(ProxyConfiguration.builder()).build(); + } + + private ProxyConfiguration.Builder setAllPropertiesToRandomValues(ProxyConfiguration.Builder builder) { + Stream.of(builder.getClass().getDeclaredMethods()) + .filter(m -> m.getParameterCount() == 1 && m.getReturnType().equals(ProxyConfiguration.Builder.class)) + .forEach(m -> { + try { + m.setAccessible(true); + setRandomValue(builder, m); + } catch (Exception e) { + throw new RuntimeException("Could not create random proxy config", e); + } + }); + return builder; + } + + private void setRandomValue(Object o, Method setter) throws InvocationTargetException, IllegalAccessException { + Class paramClass = setter.getParameterTypes()[0]; + + if (String.class.equals(paramClass)) { + setter.invoke(o, randomString()); + } else if (int.class.equals(paramClass)) { + setter.invoke(o, RNG.nextInt()); + } else { + throw new RuntimeException("Don't know how create random value for type " + paramClass); + } + } + + private void verifyAllPropertiesSet(ProxyConfiguration cfg) { + boolean hasNullProperty = Stream.of(cfg.getClass().getDeclaredMethods()) + .filter(m -> !m.getReturnType().equals(Void.class) && m.getParameterCount() == 0) + .anyMatch(m -> { + m.setAccessible(true); + try { + return m.invoke(cfg) == null; + } catch (Exception e) { + return true; + } + }); + + if (hasNullProperty) { + throw new RuntimeException("Given configuration has unset property"); + } + } + + private String randomString() { + String alpha = "abcdefghijklmnopqrstuwxyz"; + + StringBuilder sb = new StringBuilder(16); + for (int i = 0; i < 16; ++i) { + sb.append(alpha.charAt(RNG.nextInt(16))); + } + + return sb.toString(); + } +} diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyWireMockTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyWireMockTest.java new file mode 100644 index 000000000000..a0af3ee981ce --- /dev/null +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/ProxyWireMockTest.java @@ -0,0 +1,133 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + + +package software.amazon.awssdk.http.crt; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; +import static java.util.Collections.emptyMap; +import static org.assertj.core.api.Assertions.assertThat; +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import java.net.URI; +import java.nio.ByteBuffer; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.crt.CrtResource; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; + +/** + * Tests for HTTP proxy functionality in the CRT client. + */ +public class ProxyWireMockTest { + private SdkAsyncHttpClient client; + + private EventLoopGroup eventLoopGroup; + private HostResolver hostResolver; + + private ProxyConfiguration proxyCfg; + + private WireMockServer mockProxy = new WireMockServer(new WireMockConfiguration() + .dynamicPort() + .dynamicHttpsPort() + .enableBrowserProxying(true)); // make the mock proxy actually forward (to the mock server for our test) + + private WireMockServer mockServer = new WireMockServer(new WireMockConfiguration() + .dynamicPort() + .dynamicHttpsPort()); + + + @Before + public void setup() { + mockProxy.start(); + mockServer.start(); + + mockServer.stubFor(get(urlMatching(".*")).willReturn(aResponse().withStatus(200).withBody("hello"))); + + proxyCfg = ProxyConfiguration.builder() + .host("localhost") + .port(mockProxy.port()) + .build(); + + + int numThreads = Runtime.getRuntime().availableProcessors(); + eventLoopGroup = new EventLoopGroup(numThreads); + hostResolver = new HostResolver(eventLoopGroup); + + client = AwsCrtAsyncHttpClient.builder() + .eventLoopGroup(eventLoopGroup) + .hostResolver(hostResolver) + .proxyConfiguration(proxyCfg) + .build(); + } + + @After + public void teardown() { + mockServer.stop(); + mockProxy.stop(); + client.close(); + eventLoopGroup.close(); + hostResolver.close(); + CrtResource.waitForNoResources(); + } + + /* + * Note the contrast between this test and the netty connect test. The CRT proxy implementation does not + * do a CONNECT call for requests using http, so by configuring the proxy mock to forward and the server mock + * to return success, we can actually create an end-to-end test. + * + * We have an outstanding request to change this behavior to match https (use a CONNECT call). Once that + * change happens, this test will break and need to be updated to be more like the netty one. + */ + @Test + public void proxyConfigured_httpGet() throws Throwable { + + CompletableFuture streamReceived = new CompletableFuture<>(); + final AtomicReference response = new AtomicReference<>(null); + final AtomicReference error = new AtomicReference<>(null); + + Subscriber subscriber = CrtHttpClientTestUtils.createDummySubscriber(); + + SdkAsyncHttpResponseHandler handler = CrtHttpClientTestUtils.createTestResponseHandler(response, streamReceived, error, subscriber); + + URI uri = URI.create("http://localhost:" + mockServer.port()); + SdkHttpRequest request = CrtHttpClientTestUtils.createRequest(uri, "/server/test", null, SdkHttpMethod.GET, emptyMap()); + + CompletableFuture future = client.execute(AsyncExecuteRequest.builder() + .request(request) + .responseHandler(handler) + .requestContentPublisher(new EmptyPublisher()) + .build()); + future.get(60, TimeUnit.SECONDS); + assertThat(error.get()).isNull(); + assertThat(streamReceived.get(60, TimeUnit.SECONDS)).isTrue(); + assertThat(response.get().statusCode()).isEqualTo(200); + } + +} diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index 17ee1903f2e5..8e59fd13b54f 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 @@ -46,6 +46,11 @@ utils ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java index b6f2bbafb098..4df1195e9ff6 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.http.nio.netty; +import static software.amazon.awssdk.http.HttpMetric.HTTP_CLIENT_NAME; import static software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration.EVENTLOOP_SHUTDOWN_FUTURE_TIMEOUT_SECONDS; import static software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration.EVENTLOOP_SHUTDOWN_QUIET_PERIOD_SECONDS; import static software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration.EVENTLOOP_SHUTDOWN_TIMEOUT_SECONDS; @@ -23,7 +24,6 @@ import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslProvider; import java.net.URI; @@ -51,6 +51,7 @@ import software.amazon.awssdk.http.nio.netty.internal.NonManagedEventLoopGroup; import software.amazon.awssdk.http.nio.netty.internal.RequestContext; import software.amazon.awssdk.http.nio.netty.internal.SdkChannelOptions; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPoolMap; import software.amazon.awssdk.http.nio.netty.internal.SharedSdkEventLoopGroup; import software.amazon.awssdk.utils.AttributeMap; @@ -79,7 +80,7 @@ public final class NettyNioAsyncHttpClient implements SdkAsyncHttpClient { .build(); private final SdkEventLoopGroup sdkEventLoopGroup; - private final SdkChannelPoolMap pools; + private final SdkChannelPoolMap pools; private final NettyConfiguration configuration; private NettyNioAsyncHttpClient(DefaultBuilder builder, AttributeMap serviceDefaultsMap) { @@ -107,7 +108,7 @@ private NettyNioAsyncHttpClient(DefaultBuilder builder, AttributeMap serviceDefa @SdkTestInternalApi NettyNioAsyncHttpClient(SdkEventLoopGroup sdkEventLoopGroup, - SdkChannelPoolMap pools, + SdkChannelPoolMap pools, NettyConfiguration configuration) { this.sdkEventLoopGroup = sdkEventLoopGroup; this.pools = pools; @@ -117,6 +118,7 @@ private NettyNioAsyncHttpClient(DefaultBuilder builder, AttributeMap serviceDefa @Override public CompletableFuture execute(AsyncExecuteRequest request) { RequestContext ctx = createRequestContext(request); + ctx.metricCollector().reportMetric(HTTP_CLIENT_NAME, clientName()); // TODO: Can't this be done in core? return new NettyRequestExecutor(ctx).execute(); } @@ -125,7 +127,7 @@ public static Builder builder() { } private RequestContext createRequestContext(AsyncExecuteRequest request) { - ChannelPool pool = pools.get(poolKey(request.request())); + SdkChannelPool pool = pools.get(poolKey(request.request())); return new RequestContext(pool, sdkEventLoopGroup.eventLoopGroup(), request, configuration); } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java index 01a42222c588..341f453f2c10 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/AwaitCloseChannelPoolMap.java @@ -150,7 +150,7 @@ protected SimpleChannelPoolAwareChannelPool newPool(URI key) { baseChannelPool = tcpChannelPool; } - ChannelPool wrappedPool = wrapBaseChannelPool(bootstrap, baseChannelPool); + SdkChannelPool wrappedPool = wrapBaseChannelPool(bootstrap, baseChannelPool); channelPoolRef.set(wrappedPool); return new SimpleChannelPoolAwareChannelPool(wrappedPool, tcpChannelPool); @@ -231,33 +231,32 @@ private URI proxyAddress(URI remoteHost) { } } - private ChannelPool wrapBaseChannelPool(Bootstrap bootstrap, ChannelPool channelPool) { + private SdkChannelPool wrapBaseChannelPool(Bootstrap bootstrap, ChannelPool channelPool) { // Wrap the channel pool such that the ChannelAttributeKey.CLOSE_ON_RELEASE flag is honored. channelPool = new HonorCloseOnReleaseChannelPool(channelPool); // Wrap the channel pool such that HTTP 2 channels won't be released to the underlying pool while they're still in use. - channelPool = new HttpOrHttp2ChannelPool(channelPool, - bootstrap.config().group(), - configuration.maxConnections(), - configuration); - + SdkChannelPool sdkChannelPool = new HttpOrHttp2ChannelPool(channelPool, + bootstrap.config().group(), + configuration.maxConnections(), + configuration); // Wrap the channel pool such that we remove request-specific handlers with each request. - channelPool = new HandlerRemovingChannelPool(channelPool); + sdkChannelPool = new HandlerRemovingChannelPool(sdkChannelPool); // Wrap the channel pool such that an individual channel can only be released to the underlying pool once. - channelPool = new ReleaseOnceChannelPool(channelPool); + sdkChannelPool = new ReleaseOnceChannelPool(sdkChannelPool); // Wrap the channel pool to guarantee all channels checked out are healthy, and all unhealthy channels checked in are // closed. - channelPool = new HealthCheckedChannelPool(bootstrap.config().group(), configuration, channelPool); + sdkChannelPool = new HealthCheckedChannelPool(bootstrap.config().group(), configuration, sdkChannelPool); // Wrap the channel pool such that if the Promise given to acquire(Promise) is done when the channel is acquired // from the underlying pool, the channel is closed and released. - channelPool = new CancellableAcquireChannelPool(bootstrap.config().group().next(), channelPool); + sdkChannelPool = new CancellableAcquireChannelPool(bootstrap.config().group().next(), sdkChannelPool); - return channelPool; + return sdkChannelPool; } private SslContext sslContext(URI targetAddress) { diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPool.java index 3a972f74fddf..c8fe1cd8b739 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPool.java @@ -20,7 +20,9 @@ import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; +import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollector; /** * Simple decorator {@link ChannelPool} that attempts to complete the promise @@ -29,11 +31,11 @@ * is closed then released back to the delegate. */ @SdkInternalApi -public final class CancellableAcquireChannelPool implements ChannelPool { +public final class CancellableAcquireChannelPool implements SdkChannelPool { private final EventExecutor executor; - private final ChannelPool delegatePool; + private final SdkChannelPool delegatePool; - public CancellableAcquireChannelPool(EventExecutor executor, ChannelPool delegatePool) { + public CancellableAcquireChannelPool(EventExecutor executor, SdkChannelPool delegatePool) { this.executor = executor; this.delegatePool = delegatePool; } @@ -73,4 +75,9 @@ public Future release(Channel channel, Promise promise) { public void close() { delegatePool.close(); } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + return delegatePool.collectChannelPoolMetrics(metrics); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelAttributeKey.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelAttributeKey.java index f2feca135ffa..27c180ac5870 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelAttributeKey.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ChannelAttributeKey.java @@ -18,6 +18,7 @@ import io.netty.channel.Channel; import io.netty.handler.codec.http.LastHttpContent; import io.netty.handler.codec.http2.Http2Connection; +import io.netty.handler.codec.http2.Http2FrameStream; import io.netty.util.AttributeKey; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; @@ -26,6 +27,7 @@ import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.nio.netty.internal.http2.Http2MultiplexedChannelPool; import software.amazon.awssdk.http.nio.netty.internal.http2.PingTracker; +import software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils; /** * Keys for attributes attached via {@link io.netty.channel.Channel#attr(AttributeKey)}. @@ -36,70 +38,78 @@ public final class ChannelAttributeKey { /** * Future that when a protocol (http/1.1 or h2) has been selected. */ - public static final AttributeKey> PROTOCOL_FUTURE = AttributeKey.newInstance( + public static final AttributeKey> PROTOCOL_FUTURE = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.protocolFuture"); /** * Reference to {@link Http2MultiplexedChannelPool} which stores information about leased streams for a multiplexed * connection. */ - public static final AttributeKey HTTP2_MULTIPLEXED_CHANNEL_POOL = AttributeKey.newInstance( - "aws.http.nio.netty.async.http2MultiplexedChannelPool"); + public static final AttributeKey HTTP2_MULTIPLEXED_CHANNEL_POOL = + NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.http2MultiplexedChannelPool"); public static final AttributeKey PING_TRACKER = - AttributeKey.newInstance("aws.http.nio.netty.async.h2.pingTracker"); + NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.h2.pingTracker"); public static final AttributeKey HTTP2_CONNECTION = - AttributeKey.newInstance("aws.http.nio.netty.async.http2Connection"); + NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.http2Connection"); public static final AttributeKey HTTP2_INITIAL_WINDOW_SIZE = - AttributeKey.newInstance("aws.http.nio.netty.async.http2InitialWindowSize"); + NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.http2InitialWindowSize"); /** * Value of the MAX_CONCURRENT_STREAMS from the server's SETTING frame. */ - public static final AttributeKey MAX_CONCURRENT_STREAMS = AttributeKey.newInstance( + public static final AttributeKey MAX_CONCURRENT_STREAMS = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.maxConcurrentStreams"); + /** + * The {@link Http2FrameStream} associated with this stream channel. This is added to stream channels when they are created, + * before they are fully initialized. + */ + public static final AttributeKey HTTP2_FRAME_STREAM = NettyUtils.getOrCreateAttributeKey( + "aws.http.nio.netty.async.http2FrameStream"); + /** * {@link AttributeKey} to keep track of whether we should close the connection after this request * has completed. */ - static final AttributeKey KEEP_ALIVE = AttributeKey.newInstance("aws.http.nio.netty.async.keepAlive"); + static final AttributeKey KEEP_ALIVE = NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.keepAlive"); /** * Attribute key for {@link RequestContext}. */ - static final AttributeKey REQUEST_CONTEXT_KEY = AttributeKey.newInstance( + static final AttributeKey REQUEST_CONTEXT_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.requestContext"); - static final AttributeKey> SUBSCRIBER_KEY = AttributeKey.newInstance( + static final AttributeKey> SUBSCRIBER_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.subscriber"); - static final AttributeKey RESPONSE_COMPLETE_KEY = AttributeKey.newInstance( + static final AttributeKey RESPONSE_COMPLETE_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.responseComplete"); /** * {@link AttributeKey} to keep track of whether we have received the {@link LastHttpContent}. */ - static final AttributeKey LAST_HTTP_CONTENT_RECEIVED_KEY = AttributeKey.newInstance( + static final AttributeKey LAST_HTTP_CONTENT_RECEIVED_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.lastHttpContentReceived"); - static final AttributeKey> EXECUTE_FUTURE_KEY = AttributeKey.newInstance( + static final AttributeKey> EXECUTE_FUTURE_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.executeFuture"); - static final AttributeKey EXECUTION_ID_KEY = AttributeKey.newInstance( + static final AttributeKey EXECUTION_ID_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.executionId"); /** * Whether the channel is still in use */ - static final AttributeKey IN_USE = AttributeKey.newInstance("aws.http.nio.netty.async.inUse"); + static final AttributeKey IN_USE = NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.inUse"); /** * Whether the channel should be closed once it is released. */ - static final AttributeKey CLOSE_ON_RELEASE = AttributeKey.newInstance("aws.http.nio.netty.async.closeOnRelease"); + static final AttributeKey CLOSE_ON_RELEASE = NettyUtils.getOrCreateAttributeKey( + "aws.http.nio.netty.async.closeOnRelease"); private ChannelAttributeKey() { } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPool.java index 1ce19749da49..680b594a8cd7 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPool.java @@ -20,25 +20,26 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; -import io.netty.channel.pool.ChannelPool; import io.netty.handler.timeout.ReadTimeoutHandler; import io.netty.handler.timeout.WriteTimeoutHandler; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; +import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.nio.netty.internal.http2.FlushOnReadHandler; import software.amazon.awssdk.http.nio.netty.internal.nrs.HttpStreamsClientHandler; +import software.amazon.awssdk.metrics.MetricCollector; /** * Removes any per request {@link ChannelHandler} from the pipeline prior to releasing * it to the pool. */ @SdkInternalApi -public class HandlerRemovingChannelPool implements ChannelPool { +public class HandlerRemovingChannelPool implements SdkChannelPool { - private final ChannelPool delegate; + private final SdkChannelPool delegate; - public HandlerRemovingChannelPool(ChannelPool delegate) { + public HandlerRemovingChannelPool(SdkChannelPool delegate) { this.delegate = delegate; } @@ -86,4 +87,9 @@ private void removePerRequestHandlers(Channel channel) { WriteTimeoutHandler.class); } } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + return delegate.collectChannelPoolMetrics(metrics); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPool.java index 038fcc5f83da..dd8ca3cf53fa 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPool.java @@ -23,9 +23,11 @@ import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; import io.netty.util.concurrent.ScheduledFuture; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollector; /** * An implementation of {@link ChannelPool} that validates the health of its connections. @@ -40,14 +42,14 @@ * {@link NettyConfiguration#connectionAcquireTimeoutMillis()} timeout is reached. */ @SdkInternalApi -public class HealthCheckedChannelPool implements ChannelPool { +public class HealthCheckedChannelPool implements SdkChannelPool { private final EventLoopGroup eventLoopGroup; private final int acquireTimeoutMillis; - private final ChannelPool delegate; + private final SdkChannelPool delegate; public HealthCheckedChannelPool(EventLoopGroup eventLoopGroup, NettyConfiguration configuration, - ChannelPool delegate) { + SdkChannelPool delegate) { this.eventLoopGroup = eventLoopGroup; this.acquireTimeoutMillis = configuration.connectionAcquireTimeoutMillis(); this.delegate = delegate; @@ -169,4 +171,9 @@ private boolean isHealthy(Channel channel) { return channel.isActive(); } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + return delegate.collectChannelPoolMetrics(metrics); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java index b2a416c0cafe..f687f7052c3d 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/Http1TunnelConnectionPool.java @@ -29,6 +29,7 @@ import java.net.URI; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.StringUtils; @@ -37,7 +38,7 @@ */ @SdkInternalApi public class Http1TunnelConnectionPool implements ChannelPool { - static final AttributeKey TUNNEL_ESTABLISHED_KEY = AttributeKey.newInstance( + static final AttributeKey TUNNEL_ESTABLISHED_KEY = NettyUtils.getOrCreateAttributeKey( "aws.http.nio.netty.async.Http1TunnelConnectionPool.tunnelEstablished"); private static final Logger log = Logger.loggerFor(Http1TunnelConnectionPool.class); diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPool.java new file mode 100644 index 000000000000..4b0f4571f1bc --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPool.java @@ -0,0 +1,235 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.doInEventLoop; + +import io.netty.channel.Channel; +import io.netty.channel.pool.ChannelPool; +import io.netty.util.AttributeKey; +import io.netty.util.concurrent.EventExecutor; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.Logger; + +/** + * A channel pool implementation that tracks the number of "idle" channels in an underlying channel pool. + * + *

Specifically, this pool counts the number of channels acquired and then released from/to the underlying channel pool. It + * will monitor for the underlying channels to be closed, and will remove them from the "idle" count. + */ +@SdkInternalApi +public class IdleConnectionCountingChannelPool implements SdkChannelPool { + private static final Logger log = Logger.loggerFor(IdleConnectionCountingChannelPool.class); + + /** + * The idle channel state for a specific channel. This should only be accessed from the {@link #executor}. + */ + private static final AttributeKey CHANNEL_STATE = + NettyUtils.getOrCreateAttributeKey("IdleConnectionCountingChannelPool.CHANNEL_STATE"); + + /** + * The executor in which all updates to {@link #idleConnections} is performed. + */ + private final EventExecutor executor; + + /** + * The delegate pool to which all acquire and release calls are delegated. + */ + private final ChannelPool delegatePool; + + /** + * The number of idle connections in the underlying channel pool. This value is only valid if accessed from the + * {@link #executor}. + */ + private int idleConnections = 0; + + public IdleConnectionCountingChannelPool(EventExecutor executor, ChannelPool delegatePool) { + this.executor = executor; + this.delegatePool = delegatePool; + } + + @Override + public Future acquire() { + return acquire(executor.newPromise()); + } + + @Override + public Future acquire(Promise promise) { + Future acquirePromise = delegatePool.acquire(executor.newPromise()); + acquirePromise.addListener(f -> { + Throwable failure = acquirePromise.cause(); + if (failure != null) { + promise.setFailure(failure); + } else { + Channel channel = acquirePromise.getNow(); + channelAcquired(channel); + promise.setSuccess(channel); + } + }); + + return promise; + } + + @Override + public Future release(Channel channel) { + channelReleased(channel); + return delegatePool.release(channel); + } + + @Override + public Future release(Channel channel, Promise promise) { + channelReleased(channel); + return delegatePool.release(channel, promise); + } + + @Override + public void close() { + delegatePool.close(); + } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + CompletableFuture result = new CompletableFuture<>(); + doInEventLoop(executor, () -> { + metrics.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, idleConnections); + result.complete(null); + }); + return result; + } + + /** + * Add a listener to the provided channel that will update the idle channel count when the channel is closed. + */ + private void addUpdateIdleCountOnCloseListener(Channel channel) { + channel.closeFuture().addListener(f -> channelClosed(channel)); + } + + /** + * Invoked when a channel is acquired, marking it non-idle until it's closed or released. + */ + private void channelAcquired(Channel channel) { + doInEventLoop(executor, () -> { + ChannelIdleState channelIdleState = getChannelIdleState(channel); + + if (channelIdleState == null) { + addUpdateIdleCountOnCloseListener(channel); + setChannelIdleState(channel, ChannelIdleState.NOT_IDLE); + } else { + switch (channelIdleState) { + case IDLE: + decrementIdleConnections(); + setChannelIdleState(channel, ChannelIdleState.NOT_IDLE); + break; + case CLOSED: + break; + case NOT_IDLE: + default: + log.warn(() -> "Failed to update idle connection count metric on acquire, because the channel (" + + channel + ") was in an unexpected state: " + channelIdleState); + } + } + }); + } + + /** + * Invoked when a channel is released, marking it idle until it's acquired. + */ + private void channelReleased(Channel channel) { + doInEventLoop(executor, () -> { + ChannelIdleState channelIdleState = getChannelIdleState(channel); + + if (channelIdleState == null) { + log.warn(() -> "Failed to update idle connection count metric on release, because the channel (" + channel + + ") was in an unexpected state: null"); + } else { + switch (channelIdleState) { + case NOT_IDLE: + incrementIdleConnections(); + setChannelIdleState(channel, ChannelIdleState.IDLE); + break; + case CLOSED: + break; + case IDLE: + default: + log.warn(() -> "Failed to update idle connection count metric on release, because the channel (" + + channel + ") was in an unexpected state: " + channelIdleState); + } + } + }); + } + + /** + * Invoked when a channel is closed, ensure it is marked as non-idle. + */ + private void channelClosed(Channel channel) { + doInEventLoop(executor, () -> { + ChannelIdleState channelIdleState = getChannelIdleState(channel); + setChannelIdleState(channel, ChannelIdleState.CLOSED); + + if (channelIdleState != null) { + switch (channelIdleState) { + case IDLE: + decrementIdleConnections(); + break; + case NOT_IDLE: + break; + default: + log.warn(() -> "Failed to update idle connection count metric on close, because the channel (" + channel + + ") was in an unexpected state: " + channelIdleState); + } + } + }); + } + + private ChannelIdleState getChannelIdleState(Channel channel) { + return channel.attr(CHANNEL_STATE).get(); + } + + private void setChannelIdleState(Channel channel, ChannelIdleState newState) { + channel.attr(CHANNEL_STATE).set(newState); + } + + /** + * Decrement the idle connection count. This must be invoked from the {@link #executor}. + */ + private void decrementIdleConnections() { + --idleConnections; + log.trace(() -> "Idle connection count decremented, now " + idleConnections); + } + + /** + * Increment the idle connection count. This must be invoked from the {@link #executor}. + */ + private void incrementIdleConnections() { + ++idleConnections; + log.trace(() -> "Idle connection count incremented, now " + idleConnections); + } + + /** + * The idle state of a channel. + */ + private enum ChannelIdleState { + IDLE, + NOT_IDLE, + CLOSED + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutor.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutor.java index 64d1c152f3a4..72214c9a1802 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutor.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutor.java @@ -67,6 +67,7 @@ import software.amazon.awssdk.http.nio.netty.internal.nrs.HttpStreamsClientHandler; import software.amazon.awssdk.http.nio.netty.internal.nrs.StreamedHttpRequest; import software.amazon.awssdk.http.nio.netty.internal.utils.ChannelUtils; +import software.amazon.awssdk.metrics.MetricCollector; @SdkInternalApi public final class NettyRequestExecutor { @@ -87,8 +88,8 @@ public NettyRequestExecutor(RequestContext context) { @SuppressWarnings("unchecked") public CompletableFuture execute() { Promise channelFuture = context.eventLoopGroup().next().newPromise(); + executeFuture = createExecutionFuture(channelFuture); context.channelPool().acquire(channelFuture); - executeFuture = createExecuteFuture(channelFuture); channelFuture.addListener((GenericFutureListener) this::makeRequestListener); return executeFuture; } @@ -100,10 +101,13 @@ public CompletableFuture execute() { * * @return The created execution future. */ - private CompletableFuture createExecuteFuture(Promise channelPromise) { - CompletableFuture future = new CompletableFuture<>(); + private CompletableFuture createExecutionFuture(Promise channelPromise) { + CompletableFuture metricsFuture = initiateMetricsCollection(); + CompletableFuture future = new CompletableFuture<>(); future.whenComplete((r, t) -> { + verifyMetricsWereCollected(metricsFuture); + if (t == null) { return; } @@ -131,6 +135,31 @@ private CompletableFuture createExecuteFuture(Promise channelProm return future; } + private CompletableFuture initiateMetricsCollection() { + MetricCollector metricCollector = context.metricCollector(); + if (!NettyRequestMetrics.metricsAreEnabled(metricCollector)) { + return null; + } + return context.channelPool().collectChannelPoolMetrics(metricCollector); + } + + private void verifyMetricsWereCollected(CompletableFuture metricsFuture) { + if (metricsFuture == null) { + return; + } + + if (!metricsFuture.isDone()) { + log.debug("HTTP request metric collection did not finish in time, so results may be incomplete."); + metricsFuture.cancel(false); + return; + } + + metricsFuture.exceptionally(t -> { + log.debug("HTTP request metric collection failed, so results may be incomplete.", t); + return null; + }); + } + private void makeRequestListener(Future channelFuture) { if (channelFuture.isSuccess()) { channel = channelFuture.getNow(); @@ -209,6 +238,8 @@ private void writeRequest(HttpRequest request) { // Done writing so remove the idle write timeout handler ChannelUtils.removeIfExists(channel.pipeline(), WriteTimeoutHandler.class); if (wireCall.isSuccess()) { + NettyRequestMetrics.publishHttp2StreamMetrics(context.metricCollector(), channel); + if (context.executeRequest().fullDuplex()) { return; } @@ -216,7 +247,6 @@ private void writeRequest(HttpRequest request) { channel.pipeline().addFirst(new ReadTimeoutHandler(context.configuration().readTimeoutMillis(), TimeUnit.MILLISECONDS)); channel.read(); - } else { // TODO: Are there cases where we can keep the channel open? closeAndRelease(channel); diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestMetrics.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestMetrics.java new file mode 100644 index 000000000000..092b1c79d686 --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestMetrics.java @@ -0,0 +1,76 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import io.netty.channel.Channel; +import io.netty.handler.codec.http2.Http2Connection; +import io.netty.handler.codec.http2.Http2Stream; +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.Http2Metric; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.NoOpMetricCollector; + +/** + * Utilities for collecting and publishing request-level metrics. + */ +@SdkInternalApi +public class NettyRequestMetrics { + private NettyRequestMetrics() { + } + + /** + * Determine whether metrics are enabled, based on the provided metric collector. + */ + public static boolean metricsAreEnabled(MetricCollector metricCollector) { + return metricCollector != null && !(metricCollector instanceof NoOpMetricCollector); + } + + /** + * Publish stream metrics for the provided stream channel to the provided collector. This should only be invoked after + * the stream has been initialized. If the stream is not initialized when this is invoked, an exception will be thrown. + */ + public static void publishHttp2StreamMetrics(MetricCollector metricCollector, Channel channel) { + if (!metricsAreEnabled(metricCollector)) { + return; + } + + getHttp2Connection(channel).ifPresent(http2Connection -> { + writeHttp2RequestMetrics(metricCollector, channel, http2Connection); + }); + } + + private static Optional getHttp2Connection(Channel channel) { + Channel parentChannel = channel.parent(); + if (parentChannel == null) { + return Optional.empty(); + } + + return Optional.ofNullable(parentChannel.attr(ChannelAttributeKey.HTTP2_CONNECTION).get()); + } + + private static void writeHttp2RequestMetrics(MetricCollector metricCollector, + Channel channel, + Http2Connection http2Connection) { + int streamId = channel.attr(ChannelAttributeKey.HTTP2_FRAME_STREAM).get().id(); + + Http2Stream stream = http2Connection.stream(streamId); + metricCollector.reportMetric(Http2Metric.LOCAL_STREAM_WINDOW_SIZE_IN_BYTES, + http2Connection.local().flowController().windowSize(stream)); + metricCollector.reportMetric(Http2Metric.REMOTE_STREAM_WINDOW_SIZE_IN_BYTES, + http2Connection.remote().flowController().windowSize(stream)); + } +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ReleaseOnceChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ReleaseOnceChannelPool.java index e0fb07427704..c4e80a1922e9 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ReleaseOnceChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ReleaseOnceChannelPool.java @@ -23,9 +23,12 @@ import io.netty.util.concurrent.GenericFutureListener; import io.netty.util.concurrent.Promise; import io.netty.util.concurrent.SucceededFuture; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.nio.netty.internal.http2.Http2MultiplexedChannelPool; +import software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils; +import software.amazon.awssdk.metrics.MetricCollector; /** * Wrapper around a {@link ChannelPool} to protect it from having the same channel released twice. This can @@ -33,13 +36,14 @@ * mechanism to track leased connections. */ @SdkInternalApi -public class ReleaseOnceChannelPool implements ChannelPool { +public class ReleaseOnceChannelPool implements SdkChannelPool { - private static final AttributeKey IS_RELEASED = AttributeKey.newInstance("isReleased"); + private static final AttributeKey IS_RELEASED = NettyUtils.getOrCreateAttributeKey( + "software.amazon.awssdk.http.nio.netty.internal.http2.ReleaseOnceChannelPool.isReleased"); - private final ChannelPool delegate; + private final SdkChannelPool delegate; - public ReleaseOnceChannelPool(ChannelPool delegate) { + public ReleaseOnceChannelPool(SdkChannelPool delegate) { this.delegate = delegate; } @@ -90,4 +94,9 @@ private boolean shouldRelease(Channel channel) { public void close() { delegate.close(); } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + return delegate.collectChannelPoolMetrics(metrics); + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestContext.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestContext.java index 356703778939..9dc2e09f1d4e 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestContext.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/RequestContext.java @@ -16,20 +16,23 @@ package software.amazon.awssdk.http.nio.netty.internal; import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.NoOpMetricCollector; @SdkInternalApi public final class RequestContext { - private final ChannelPool channelPool; + private final SdkChannelPool channelPool; private final EventLoopGroup eventLoopGroup; private final AsyncExecuteRequest executeRequest; private final NettyConfiguration configuration; - public RequestContext(ChannelPool channelPool, + private final MetricCollector metricCollector; + + public RequestContext(SdkChannelPool channelPool, EventLoopGroup eventLoopGroup, AsyncExecuteRequest executeRequest, NettyConfiguration configuration) { @@ -37,9 +40,10 @@ public RequestContext(ChannelPool channelPool, this.eventLoopGroup = eventLoopGroup; this.executeRequest = executeRequest; this.configuration = configuration; + this.metricCollector = executeRequest.metricCollector().orElseGet(NoOpMetricCollector::create); } - public ChannelPool channelPool() { + public SdkChannelPool channelPool() { return channelPool; } @@ -64,4 +68,8 @@ public SdkAsyncHttpResponseHandler handler() { public NettyConfiguration configuration() { return configuration; } + + public MetricCollector metricCollector() { + return metricCollector; + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java index 3c66a69253a2..8019d0816a93 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ResponseHandler.java @@ -52,6 +52,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.HttpStatusFamily; import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.SdkCancellationException; import software.amazon.awssdk.http.SdkHttpFullResponse; @@ -85,7 +86,7 @@ protected void channelRead0(ChannelHandlerContext channelContext, HttpObject msg .statusCode(response.status().code()) .statusText(response.status().reasonPhrase()) .build(); - channelContext.channel().attr(KEEP_ALIVE).set(HttpUtil.isKeepAlive(response)); + channelContext.channel().attr(KEEP_ALIVE).set(shouldKeepAlive(response)); requestContext.handler().onHeaders(sdkResponse); } @@ -128,6 +129,13 @@ private static void finalizeResponse(RequestContext requestContext, ChannelHandl } } + private boolean shouldKeepAlive(HttpResponse response) { + if (HttpStatusFamily.of(response.status().code()) == HttpStatusFamily.SERVER_ERROR) { + return false; + } + return HttpUtil.isKeepAlive(response); + } + @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { RequestContext requestContext = ctx.channel().attr(REQUEST_CONTEXT_KEY).get(); diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelPool.java new file mode 100644 index 000000000000..3238797eecde --- /dev/null +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SdkChannelPool.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import io.netty.channel.pool.ChannelPool; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollector; + +/** + * A {@link ChannelPool} implementation that allows a caller to asynchronously retrieve channel-pool related metrics via + * {@link #collectChannelPoolMetrics(MetricCollector)}. + */ +@SdkInternalApi +public interface SdkChannelPool extends ChannelPool { + /** + * Collect channel pool metrics into the provided {@link MetricCollector} collection, completing the returned future when + * all metric publishing is complete. + * + * @param metrics The collection to which all metrics should be added. + * @return A future that is completed when all metric publishing is complete. + */ + CompletableFuture collectChannelPoolMetrics(MetricCollector metrics); +} diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SimpleChannelPoolAwareChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SimpleChannelPoolAwareChannelPool.java index dd9778521321..30e6247d419e 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SimpleChannelPoolAwareChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/SimpleChannelPoolAwareChannelPool.java @@ -16,17 +16,18 @@ package software.amazon.awssdk.http.nio.netty.internal; import io.netty.channel.Channel; -import io.netty.channel.pool.ChannelPool; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; +import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollector; @SdkInternalApi -final class SimpleChannelPoolAwareChannelPool implements ChannelPool { - private final ChannelPool delegate; +final class SimpleChannelPoolAwareChannelPool implements SdkChannelPool { + private final SdkChannelPool delegate; private final BetterSimpleChannelPool simpleChannelPool; - SimpleChannelPoolAwareChannelPool(ChannelPool delegate, BetterSimpleChannelPool simpleChannelPool) { + SimpleChannelPoolAwareChannelPool(SdkChannelPool delegate, BetterSimpleChannelPool simpleChannelPool) { this.delegate = delegate; this.simpleChannelPool = simpleChannelPool; } @@ -60,4 +61,8 @@ public BetterSimpleChannelPool underlyingSimpleChannelPool() { return simpleChannelPool; } + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + return delegate.collectChannelPoolMetrics(metrics); + } } diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterTimedOutException.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ConnectionTerminatingException.java similarity index 65% rename from test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterTimedOutException.java rename to http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ConnectionTerminatingException.java index b3c0a6804b61..e241eb98c572 100644 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterTimedOutException.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ConnectionTerminatingException.java @@ -13,18 +13,17 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.core.waiters; +package software.amazon.awssdk.http.nio.netty.internal.http2; -public class WaiterTimedOutException extends RuntimeException { +import software.amazon.awssdk.annotations.SdkInternalApi; - /** - * Constructs a new WaiterTimedOutException with the specified error - * message. - * - * @param message Describes the error encountered. - */ - public WaiterTimedOutException(String message) { +/** + * Exception indicating a connection is terminating + */ +@SdkInternalApi +final class Http2ConnectionTerminatingException extends RuntimeException { + + Http2ConnectionTerminatingException(String message) { super(message); } - } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPool.java index 0b720f52170b..d02bf5d96a09 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPool.java @@ -15,9 +15,12 @@ package software.amazon.awssdk.http.nio.netty.internal.http2; +import static java.util.stream.Collectors.toList; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_CONNECTION; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_INITIAL_WINDOW_SIZE; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.HTTP2_MULTIPLEXED_CHANNEL_POOL; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.MAX_CONCURRENT_STREAMS; +import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.doInEventLoop; import io.netty.channel.Channel; @@ -42,14 +45,18 @@ import java.util.ArrayList; import java.util.List; import java.util.Set; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.http.Protocol; -import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; import software.amazon.awssdk.http.nio.netty.internal.utils.BetterFixedChannelPool; +import software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils; +import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Validate; @@ -66,19 +73,19 @@ *

*/ @SdkInternalApi -public class Http2MultiplexedChannelPool implements ChannelPool { +public class Http2MultiplexedChannelPool implements SdkChannelPool { private static final Logger log = Logger.loggerFor(Http2MultiplexedChannelPool.class); /** * Reference to the {@link MultiplexedChannelRecord} on a channel. */ - private static final AttributeKey MULTIPLEXED_CHANNEL = AttributeKey.newInstance( - "software.amazon.awssdk.http.nio.netty.internal.http2.Http2MultiplexedChannelPool.MULTIPLEXED_CHANNEL"); + private static final AttributeKey MULTIPLEXED_CHANNEL = NettyUtils.getOrCreateAttributeKey( + "software.amazon.awssdk.http.nio.netty.internal.http2.Http2MultiplexedChannelPool.MULTIPLEXED_CHANNEL"); /** * Whether a parent channel has been released yet. This guards against double-releasing to the delegate connection pool. */ - private static final AttributeKey RELEASED = AttributeKey.newInstance( + private static final AttributeKey RELEASED = NettyUtils.getOrCreateAttributeKey( "software.amazon.awssdk.http.nio.netty.internal.http2.Http2MultiplexedChannelPool.RELEASED"); private final ChannelPool connectionPool; @@ -91,7 +98,9 @@ public class Http2MultiplexedChannelPool implements ChannelPool { /** * @param connectionPool Connection pool for parent channels (i.e. the socket channel). */ - Http2MultiplexedChannelPool(ChannelPool connectionPool, EventLoopGroup eventLoopGroup, Duration idleConnectionTimeout) { + Http2MultiplexedChannelPool(ChannelPool connectionPool, + EventLoopGroup eventLoopGroup, + Duration idleConnectionTimeout) { this.connectionPool = connectionPool; this.eventLoopGroup = eventLoopGroup; this.connections = ConcurrentHashMap.newKeySet(); @@ -140,10 +149,10 @@ private void acquireStreamOnNewConnection(Promise promise) { Channel parentChannel = newConnectionAcquire.getNow(); try { - parentChannel.attr(ChannelAttributeKey.HTTP2_MULTIPLEXED_CHANNEL_POOL).set(this); + parentChannel.attr(HTTP2_MULTIPLEXED_CHANNEL_POOL).set(this); // When the protocol future is completed on the new connection, we're ready for new streams to be added to it. - parentChannel.attr(ChannelAttributeKey.PROTOCOL_FUTURE).get() + parentChannel.attr(PROTOCOL_FUTURE).get() .thenAccept(protocol -> acquireStreamOnFreshConnection(promise, parentChannel, protocol)) .exceptionally(throwable -> failAndCloseParent(promise, parentChannel, throwable)); } catch (Throwable e) { @@ -267,7 +276,7 @@ private boolean acquireStreamOnInitializedConnection(MultiplexedChannelRecord ch } Channel channel = acquirePromise.getNow(); - channel.attr(ChannelAttributeKey.HTTP2_MULTIPLEXED_CHANNEL_POOL).set(this); + channel.attr(HTTP2_MULTIPLEXED_CHANNEL_POOL).set(this); channel.attr(MULTIPLEXED_CHANNEL).set(channelRecord); promise.setSuccess(channel); @@ -415,6 +424,59 @@ private Future doClose() { return closeFinishedPromise; } + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + CompletableFuture result = new CompletableFuture<>(); + + CompletableFuture summedMetrics = new CompletableFuture<>(); + + List> channelMetrics = + connections.stream() + .map(MultiplexedChannelRecord::getMetrics) + .collect(toList()); + + accumulateMetrics(summedMetrics, channelMetrics); + + summedMetrics.whenComplete((m, t) -> { + if (t != null) { + result.completeExceptionally(t); + } else { + try { + metrics.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, Math.toIntExact(m.getAvailableStreams())); + result.complete(null); + } catch (Exception e) { + result.completeExceptionally(e); + } + } + }); + + return result; + } + + private void accumulateMetrics(CompletableFuture result, + List> channelMetrics) { + accumulateMetrics(result, channelMetrics, new MultiplexedChannelRecord.Metrics(), 0); + } + + private void accumulateMetrics(CompletableFuture result, + List> channelMetrics, + MultiplexedChannelRecord.Metrics resultAccumulator, + int index) { + if (index >= channelMetrics.size()) { + result.complete(resultAccumulator); + return; + } + + channelMetrics.get(index).whenComplete((m, t) -> { + if (t != null) { + result.completeExceptionally(t); + } else { + resultAccumulator.add(m); + accumulateMetrics(result, channelMetrics, resultAccumulator, index + 1); + } + }); + } + @Sharable private static final class ReleaseOnExceptionHandler extends ChannelDuplexHandler { private static final ReleaseOnExceptionHandler INSTANCE = new ReleaseOnExceptionHandler(); @@ -426,7 +488,7 @@ public void channelInactive(ChannelHandlerContext ctx) { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - if (cause instanceof Http2StreamExceptionHandler.Http2StreamIoException) { + if (cause instanceof Http2ConnectionTerminatingException) { closeConnectionToNewRequests(ctx, cause); } else { closeAndReleaseParent(ctx, cause); @@ -443,7 +505,7 @@ void closeConnectionToNewRequests(ChannelHandlerContext ctx, Throwable cause) { } private void closeAndReleaseParent(ChannelHandlerContext ctx, Throwable cause) { - Http2MultiplexedChannelPool pool = ctx.channel().attr(ChannelAttributeKey.HTTP2_MULTIPLEXED_CHANNEL_POOL).get(); + Http2MultiplexedChannelPool pool = ctx.channel().attr(HTTP2_MULTIPLEXED_CHANNEL_POOL).get(); pool.closeAndReleaseParent(ctx.channel(), cause); } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandler.java index 46b9f9df5fdd..d1ff14628f0a 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandler.java @@ -19,7 +19,6 @@ import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; -import io.netty.handler.codec.http2.Http2Stream; import io.netty.handler.timeout.TimeoutException; import java.io.IOException; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -31,7 +30,7 @@ @ChannelHandler.Sharable @SdkInternalApi public final class Http2StreamExceptionHandler extends ChannelInboundHandlerAdapter { - private static final Logger log = Logger.loggerFor(Http2Stream.class); + private static final Logger log = Logger.loggerFor(Http2StreamExceptionHandler.class); private static final Http2StreamExceptionHandler INSTANCE = new Http2StreamExceptionHandler(); private Http2StreamExceptionHandler() { @@ -46,8 +45,9 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { if (isIoError(cause) && ctx.channel().parent() != null) { Channel parent = ctx.channel().parent(); log.debug(() -> "An I/O error occurred on an Http2 stream, notifying the connection channel " + parent); - parent.pipeline().fireExceptionCaught(new Http2StreamIoException("An I/O error occurred on an associated Http2 " - + "stream")); + parent.pipeline().fireExceptionCaught(new Http2ConnectionTerminatingException("An I/O error occurred on an " + + "associated Http2 " + + "stream " + ctx.channel())); } ctx.fireExceptionCaught(cause); @@ -56,10 +56,4 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { private boolean isIoError(Throwable cause) { return cause instanceof TimeoutException || cause instanceof IOException; } - - static final class Http2StreamIoException extends IOException { - Http2StreamIoException(String message) { - super(message); - } - } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ToHttpInboundAdapter.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ToHttpInboundAdapter.java index eecb2054b0c6..b4c55c0e96eb 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ToHttpInboundAdapter.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2ToHttpInboundAdapter.java @@ -16,11 +16,13 @@ package software.amazon.awssdk.http.nio.netty.internal.http2; import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.DefaultHttpContent; import io.netty.handler.codec.http.DefaultLastHttpContent; import io.netty.handler.codec.http.HttpObject; +import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http2.Http2DataFrame; import io.netty.handler.codec.http2.Http2Error; import io.netty.handler.codec.http2.Http2Exception; @@ -30,6 +32,8 @@ import io.netty.handler.codec.http2.HttpConversionUtil; import java.io.IOException; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.HttpStatusFamily; +import software.amazon.awssdk.utils.Logger; /** * Converts {@link Http2Frame}s to {@link HttpObject}s. Ignores the majority of {@link Http2Frame}s like PING @@ -37,6 +41,7 @@ */ @SdkInternalApi public class Http2ToHttpInboundAdapter extends SimpleChannelInboundHandler { + private static final Logger log = Logger.loggerFor(Http2ToHttpInboundAdapter.class); @Override protected void channelRead0(ChannelHandlerContext ctx, Http2Frame frame) throws Exception { @@ -54,7 +59,22 @@ protected void channelRead0(ChannelHandlerContext ctx, Http2Frame frame) throws } private void onHeadersRead(Http2HeadersFrame headersFrame, ChannelHandlerContext ctx) throws Http2Exception { - ctx.fireChannelRead(HttpConversionUtil.toHttpResponse(headersFrame.stream().id(), headersFrame.headers(), true)); + + HttpResponse httpResponse = HttpConversionUtil.toHttpResponse(headersFrame.stream().id(), headersFrame.headers(), true); + ctx.fireChannelRead(httpResponse); + + if (HttpStatusFamily.of(httpResponse.status().code()) == HttpStatusFamily.SERVER_ERROR) { + fireConnectionExceptionForServerError(ctx); + } + } + + private void fireConnectionExceptionForServerError(ChannelHandlerContext ctx) { + if (ctx.channel().parent() != null) { + Channel parent = ctx.channel().parent(); + log.debug(() -> "A 5xx server error occurred on an Http2 stream, notifying the connection channel " + ctx.channel()); + parent.pipeline().fireExceptionCaught(new Http2ConnectionTerminatingException("A 5xx server error occurred on an " + + "Http2 stream " + ctx.channel())); + } } private void onDataRead(Http2DataFrame dataFrame, ChannelHandlerContext ctx) throws Http2Exception { diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPool.java index 4d944d3c4e53..e10a612689ec 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPool.java @@ -26,10 +26,14 @@ import io.netty.util.concurrent.GenericFutureListener; import io.netty.util.concurrent.Promise; import java.time.Duration; +import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.nio.netty.internal.IdleConnectionCountingChannelPool; import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; import software.amazon.awssdk.http.nio.netty.internal.utils.BetterFixedChannelPool; +import software.amazon.awssdk.metrics.MetricCollector; /** * Channel pool that establishes an initial connection to determine protocol. Delegates @@ -37,15 +41,16 @@ * all connections will be negotiated with the same protocol. */ @SdkInternalApi -public class HttpOrHttp2ChannelPool implements ChannelPool { +public class HttpOrHttp2ChannelPool implements SdkChannelPool { private final ChannelPool delegatePool; private final int maxConcurrency; private final EventLoopGroup eventLoopGroup; private final EventLoop eventLoop; private final NettyConfiguration configuration; + private boolean protocolImplPromiseInitializationStarted = false; private Promise protocolImplPromise; - private ChannelPool protocolImpl; + private BetterFixedChannelPool protocolImpl; private boolean closed; public HttpOrHttp2ChannelPool(ChannelPool delegatePool, @@ -57,6 +62,7 @@ public HttpOrHttp2ChannelPool(ChannelPool delegatePool, this.eventLoopGroup = group; this.eventLoop = group.next(); this.configuration = configuration; + this.protocolImplPromise = eventLoop.newPromise(); } @Override @@ -80,7 +86,7 @@ private void acquire0(Promise promise) { protocolImpl.acquire(promise); return; } - if (protocolImplPromise == null) { + if (!protocolImplPromiseInitializationStarted) { initializeProtocol(); } protocolImplPromise.addListener((GenericFutureListener>) future -> { @@ -98,7 +104,7 @@ private void acquire0(Promise promise) { * for {@link #protocolImpl}. */ private void initializeProtocol() { - protocolImplPromise = eventLoop.newPromise(); + protocolImplPromiseInitializationStarted = true; delegatePool.acquire().addListener((GenericFutureListener>) future -> { if (future.isSuccess()) { Channel newChannel = future.getNow(); @@ -123,7 +129,8 @@ private void initializeProtocol() { private void failProtocolImplPromise(Throwable e) { doInEventLoop(eventLoop, () -> { protocolImplPromise.setFailure(e); - protocolImplPromise = null; + protocolImplPromise = eventLoop.newPromise(); + protocolImplPromiseInitializationStarted = false; }); } @@ -150,8 +157,9 @@ private void closeAndRelease(Channel newChannel, Throwable e) { private ChannelPool configureProtocol(Channel newChannel, Protocol protocol) { if (Protocol.HTTP1_1 == protocol) { // For HTTP/1.1 we use a traditional channel pool without multiplexing + SdkChannelPool idleConnectionMetricChannelPool = new IdleConnectionCountingChannelPool(eventLoop, delegatePool); protocolImpl = BetterFixedChannelPool.builder() - .channelPool(delegatePool) + .channelPool(idleConnectionMetricChannelPool) .executor(eventLoop) .acquireTimeoutAction(BetterFixedChannelPool.AcquireTimeoutAction.FAIL) .acquireTimeoutMillis(configuration.connectionAcquireTimeoutMillis()) @@ -161,7 +169,7 @@ private ChannelPool configureProtocol(Channel newChannel, Protocol protocol) { } else { Duration idleConnectionTimeout = configuration.reapIdleConnections() ? Duration.ofMillis(configuration.idleTimeoutMillis()) : null; - ChannelPool h2Pool = new Http2MultiplexedChannelPool(delegatePool, eventLoopGroup, idleConnectionTimeout); + SdkChannelPool h2Pool = new Http2MultiplexedChannelPool(delegatePool, eventLoopGroup, idleConnectionTimeout); protocolImpl = BetterFixedChannelPool.builder() .channelPool(h2Pool) .executor(eventLoop) @@ -212,7 +220,7 @@ private void close0() { closed = true; if (protocolImpl != null) { protocolImpl.close(); - } else if (protocolImplPromise != null) { + } else if (protocolImplPromiseInitializationStarted) { protocolImplPromise.addListener((Future f) -> { if (f.isSuccess()) { f.getNow().close(); @@ -224,4 +232,23 @@ private void close0() { delegatePool.close(); } } + + @Override + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + CompletableFuture result = new CompletableFuture<>(); + protocolImplPromise.addListener(f -> { + if (!f.isSuccess()) { + result.completeExceptionally(f.cause()); + } else { + protocolImpl.collectChannelPoolMetrics(metrics).whenComplete((m, t) -> { + if (t != null) { + result.completeExceptionally(t); + } else { + result.complete(m); + } + }); + } + }); + return result; + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecord.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecord.java index 92e2009a7367..f1535be6cf63 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecord.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/http2/MultiplexedChannelRecord.java @@ -34,10 +34,12 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; import software.amazon.awssdk.http.nio.netty.internal.UnusedChannelExceptionHandler; import software.amazon.awssdk.utils.Logger; @@ -108,6 +110,7 @@ void acquireClaimedStream(Promise promise) { Http2StreamChannel channel = future.getNow(); channel.pipeline().addLast(UnusedChannelExceptionHandler.getInstance()); + channel.attr(ChannelAttributeKey.HTTP2_FRAME_STREAM).set(channel.stream()); childChannels.put(channel.id(), channel); promise.setSuccess(channel); @@ -296,6 +299,15 @@ boolean canBeClosedAndReleased() { return state != RecordState.OPEN && availableChildChannels.get() == maxConcurrencyPerConnection; } + CompletableFuture getMetrics() { + CompletableFuture result = new CompletableFuture<>(); + doInEventLoop(connection.eventLoop(), () -> { + int streamCount = childChannels.size(); + result.complete(new Metrics().setAvailableStreams(maxConcurrencyPerConnection - streamCount)); + }); + return result; + } + private enum RecordState { /** * The connection is open and new streams may be acquired from it, if they are available. @@ -313,4 +325,21 @@ private enum RecordState { */ CLOSED } + + public static class Metrics { + private long availableStreams = 0; + + public long getAvailableStreams() { + return availableStreams; + } + + public Metrics setAvailableStreams(long availableStreams) { + this.availableStreams = availableStreams; + return this; + } + + public void add(Metrics rhs) { + this.availableStreams += rhs.availableStreams; + } + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPool.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPool.java index b22f2353a14e..dac4f081ec1b 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPool.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPool.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.http.nio.netty.internal.utils; +import static software.amazon.awssdk.http.nio.netty.internal.utils.NettyUtils.doInEventLoop; + import io.netty.channel.Channel; import io.netty.channel.pool.ChannelPool; import io.netty.util.concurrent.DefaultPromise; @@ -28,21 +30,25 @@ import java.nio.channels.ClosedChannelException; import java.util.ArrayDeque; import java.util.Queue; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; +import software.amazon.awssdk.metrics.MetricCollector; /** * {@link ChannelPool} implementation that takes another {@link ChannelPool} implementation and enforce a maximum * number of concurrent connections. */ //TODO: Contribute me back to Netty -public class BetterFixedChannelPool implements ChannelPool { +public class BetterFixedChannelPool implements SdkChannelPool { private static final IllegalStateException FULL_EXCEPTION = ThrowableUtil.unknownStackTrace( new IllegalStateException("Too many outstanding acquire operations"), BetterFixedChannelPool.class, "acquire0(...)"); private static final TimeoutException TIMEOUT_EXCEPTION = ThrowableUtil.unknownStackTrace( - new TimeoutException("Acquire operation took longer then configured maximum time"), + new TimeoutException("Acquire operation took longer than configured maximum time"), BetterFixedChannelPool.class, "(...)"); static final IllegalStateException POOL_CLOSED_ON_RELEASE_EXCEPTION = ThrowableUtil.unknownStackTrace( new IllegalStateException("BetterFixedChannelPooled was closed"), @@ -66,7 +72,7 @@ public enum AcquireTimeoutAction { private final EventExecutor executor; private final long acquireTimeoutNanos; private final Runnable timeoutTask; - private final ChannelPool delegateChannelPool; + private final SdkChannelPool delegateChannelPool; // There is no need to worry about synchronization as everything that modified the queue or counts is done // by the above EventExecutor. @@ -145,6 +151,22 @@ public Future acquire(final Promise promise) { return promise; } + public CompletableFuture collectChannelPoolMetrics(MetricCollector metrics) { + CompletableFuture delegateMetricResult = delegateChannelPool.collectChannelPoolMetrics(metrics); + CompletableFuture result = new CompletableFuture<>(); + doInEventLoop(executor, () -> { + try { + metrics.reportMetric(HttpMetric.MAX_CONCURRENCY, this.maxConnections); + metrics.reportMetric(HttpMetric.PENDING_CONCURRENCY_ACQUIRES, this.pendingAcquireCount); + metrics.reportMetric(HttpMetric.LEASED_CONCURRENCY, this.acquiredChannelCount); + result.complete(null); + } catch (Throwable t) { + result.completeExceptionally(t); + } + }); + return CompletableFuture.allOf(result, delegateMetricResult); + } + private void acquire0(final Promise promise) { assert executor.inEventLoop(); @@ -376,7 +398,7 @@ public static Builder builder() { public static final class Builder { - private ChannelPool channelPool; + private SdkChannelPool channelPool; private EventExecutor executor; private AcquireTimeoutAction action; private long acquireTimeoutMillis; @@ -386,7 +408,7 @@ public static final class Builder { private Builder() { } - public Builder channelPool(ChannelPool channelPool) { + public Builder channelPool(SdkChannelPool channelPool) { this.channelPool = channelPool; return this; } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java index 4d0b20b4a128..584bdc635f8e 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtils.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.http.nio.netty.internal.utils; import io.netty.channel.EventLoop; +import io.netty.util.AttributeKey; import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GenericFutureListener; @@ -160,4 +161,16 @@ public static void warnIfNotInEventLoop(EventLoop loop) { log.warn(() -> "Execution is happening outside of the expected event loop.", exception); } } + + /** + * @return an {@code AttributeKey} for {@code attr}. This returns an existing instance if it was previously created. + */ + public static AttributeKey getOrCreateAttributeKey(String attr) { + if (AttributeKey.exists(attr)) { + return AttributeKey.valueOf(attr); + } + //CHECKSTYLE:OFF - This is the only place allowed to call AttributeKey.newInstance() + return AttributeKey.newInstance(attr); + //CHECKSTYLE:ON + } } diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/OrderedWriteChannelHandlerContext.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/OrderedWriteChannelHandlerContext.java index 9e06a565e8e2..2a27c2dd32f0 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/OrderedWriteChannelHandlerContext.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/utils/OrderedWriteChannelHandlerContext.java @@ -31,7 +31,7 @@ @SdkInternalApi public class OrderedWriteChannelHandlerContext extends DelegatingChannelHandlerContext { private static final AttributeKey ORDERED = - AttributeKey.newInstance("aws.http.nio.netty.async.OrderedWriteChannelHandlerContext.ORDERED"); + NettyUtils.getOrCreateAttributeKey("aws.http.nio.netty.async.OrderedWriteChannelHandlerContext.ORDERED"); private OrderedWriteChannelHandlerContext(ChannelHandlerContext delegate) { super(delegate); diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/Http2MetricsTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/Http2MetricsTest.java new file mode 100644 index 000000000000..fe79f1e51b34 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/Http2MetricsTest.java @@ -0,0 +1,213 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame; +import io.netty.handler.codec.http2.Http2DataFrame; +import io.netty.handler.codec.http2.Http2Frame; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2HeadersFrame; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.codec.http2.Http2StreamFrame; +import io.netty.util.ReferenceCountUtil; +import java.net.URI; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.http.EmptyPublisher; +import software.amazon.awssdk.http.Http2Metric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; + +public class Http2MetricsTest { + private static final int H2_DEFAULT_WINDOW_SIZE = 65535; + private static final int SERVER_MAX_CONCURRENT_STREAMS = 2; + private static final int SERVER_INITIAL_WINDOW_SIZE = 65535 * 2; + + private static final TestHttp2Server SERVER = new TestHttp2Server(); + + @BeforeClass + public static void setup() throws InterruptedException { + SERVER.start(); + } + + @AfterClass + public static void teardown() throws InterruptedException { + SERVER.stop(); + } + + @Test + public void maxClientStreamsLowerThanServerMaxStreamsReportClientMaxStreams() { + try (SdkAsyncHttpClient client = NettyNioAsyncHttpClient.builder() + .protocol(Protocol.HTTP2) + .maxConcurrency(10) + .http2Configuration(c -> c.maxStreams(1L) + .initialWindowSize(65535 * 3)) + .build()) { + MetricCollector metricCollector = MetricCollector.create("test"); + client.execute(createExecuteRequest(metricCollector)).join(); + MetricCollection metrics = metricCollector.collect(); + + assertThat(metrics.metricValues(HttpMetric.HTTP_CLIENT_NAME)).containsExactly("NettyNio"); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(10); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(0); + // The stream window doesn't get initialized with the connection + // initial setting and the update appears to be asynchronous so + // this may be the default window size just based on when the + // stream window was queried or if this is the first time the + // stream is used (i.e. not previously pooled) + assertThat(metrics.metricValues(Http2Metric.LOCAL_STREAM_WINDOW_SIZE_IN_BYTES).get(0)).isIn(H2_DEFAULT_WINDOW_SIZE, 65535 * 3); + assertThat(metrics.metricValues(Http2Metric.REMOTE_STREAM_WINDOW_SIZE_IN_BYTES)).containsExactly(SERVER_INITIAL_WINDOW_SIZE); + } + } + + @Test + public void maxClientStreamsHigherThanServerMaxStreamsReportServerMaxStreams() { + try (SdkAsyncHttpClient client = NettyNioAsyncHttpClient.builder() + .protocol(Protocol.HTTP2) + .maxConcurrency(10) + .http2Configuration(c -> c.maxStreams(3L) + .initialWindowSize(65535 * 3)) + .build()) { + MetricCollector metricCollector = MetricCollector.create("test"); + client.execute(createExecuteRequest(metricCollector)).join(); + MetricCollection metrics = metricCollector.collect(); + + assertThat(metrics.metricValues(HttpMetric.HTTP_CLIENT_NAME)).containsExactly("NettyNio"); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(10); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY).get(0)).isIn(0, 2, 3); + // The stream window doesn't get initialized with the connection + // initial setting and the update appears to be asynchronous so + // this may be the default window size just based on when the + // stream window was queried or if this is the first time the + // stream is used (i.e. not previously pooled) + assertThat(metrics.metricValues(Http2Metric.LOCAL_STREAM_WINDOW_SIZE_IN_BYTES).get(0)).isIn(H2_DEFAULT_WINDOW_SIZE, 65535 * 3); + assertThat(metrics.metricValues(Http2Metric.REMOTE_STREAM_WINDOW_SIZE_IN_BYTES)).containsExactly(SERVER_INITIAL_WINDOW_SIZE); + } + } + + private AsyncExecuteRequest createExecuteRequest(MetricCollector metricCollector) { + URI uri = URI.create("http://localhost:" + SERVER.port()); + SdkHttpRequest request = createRequest(uri); + return AsyncExecuteRequest.builder() + .request(request) + .requestContentPublisher(new EmptyPublisher()) + .responseHandler(new RecordingResponseHandler()) + .metricCollector(metricCollector) + .build(); + } + + private SdkHttpFullRequest createRequest(URI uri) { + return SdkHttpFullRequest.builder() + .uri(uri) + .method(SdkHttpMethod.GET) + .encodedPath("/") + .putHeader("Host", uri.getHost()) + .putHeader("Content-Length", "0") + .build(); + } + + private static final class TestHttp2Server extends ChannelInitializer { + private ServerBootstrap bootstrap; + private ServerSocketChannel channel; + + private TestHttp2Server() { + } + + public void start() throws InterruptedException { + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(new NioEventLoopGroup()) + .childHandler(this) + .localAddress(0) + .childOption(ChannelOption.SO_KEEPALIVE, true); + + channel = ((ServerSocketChannel) bootstrap.bind().await().channel()); + } + + public int port() { + return channel.localAddress().getPort(); + } + + public void stop() throws InterruptedException { + channel.close().await(); + } + + @Override + protected void initChannel(SocketChannel ch) { + Http2FrameCodec codec = Http2FrameCodecBuilder.forServer() + .initialSettings(new Http2Settings() + .maxConcurrentStreams(SERVER_MAX_CONCURRENT_STREAMS) + .initialWindowSize(SERVER_INITIAL_WINDOW_SIZE)) + .build(); + ch.pipeline().addLast(codec); + ch.pipeline().addLast(new SuccessfulHandler()); + } + } + + private static class SuccessfulHandler extends ChannelInboundHandlerAdapter { + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + if (!(msg instanceof Http2Frame)) { + ctx.fireChannelRead(msg); + return; + } + ReferenceCountUtil.release(msg); + + boolean isEnd = isEndFrame(msg); + if (isEnd) { + ctx.writeAndFlush(new DefaultHttp2HeadersFrame(new DefaultHttp2Headers().status("204"), true) + .stream(((Http2StreamFrame) msg).stream())); + } + } + + private boolean isEndFrame(Object msg) { + if (msg instanceof Http2HeadersFrame) { + return ((Http2HeadersFrame) msg).isEndStream(); + } + + if (msg instanceof Http2DataFrame) { + return ((Http2DataFrame) msg).isEndStream(); + } + + return false; + } + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java index b9533c8527cd..936a056425f5 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyClientTlsAuthTest.java @@ -31,6 +31,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.FileStoreTlsKeyManagersProvider; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientSpiVerificationTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientSpiVerificationTest.java index 9b992cc90918..a4e4047fde13 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientSpiVerificationTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientSpiVerificationTest.java @@ -41,6 +41,7 @@ import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.SdkHttpConfigurationOption; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java index 23a887321616..ff02542da2d7 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClientWireMockTest.java @@ -49,7 +49,6 @@ import io.netty.channel.ChannelFuture; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.ssl.SslProvider; import io.netty.util.AttributeKey; @@ -81,6 +80,7 @@ import org.mockito.stubbing.Answer; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; +import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.http.HttpTestUtils; import software.amazon.awssdk.http.SdkHttpConfigurationOption; import software.amazon.awssdk.http.SdkHttpFullRequest; @@ -91,6 +91,8 @@ import software.amazon.awssdk.http.async.SdkHttpContentPublisher; import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration; import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPoolMap; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; +import software.amazon.awssdk.metrics.MetricCollection; import software.amazon.awssdk.utils.AttributeMap; @RunWith(MockitoJUnitRunner.class) @@ -253,10 +255,10 @@ public void customChannelFactoryIsUsed() throws Exception { @Test public void closeClient_shouldCloseUnderlyingResources() { SdkEventLoopGroup eventLoopGroup = SdkEventLoopGroup.builder().build(); - ChannelPool channelPool = mock(ChannelPool.class); - SdkChannelPoolMap sdkChannelPoolMap = new SdkChannelPoolMap() { + SdkChannelPool channelPool = mock(SdkChannelPool.class); + SdkChannelPoolMap sdkChannelPoolMap = new SdkChannelPoolMap() { @Override - protected ChannelPool newPool(URI key) { + protected SdkChannelPool newPool(URI key) { return channelPool; } }; @@ -636,7 +638,7 @@ public void testExceptionMessageChanged_WhenPendingAcquireQueueIsFull() throws E List> futures = new ArrayList<>(); for (int i = 0; i < 10; i++) { - futures.add(makeSimpleRequestAndReturnResponseHandler(customClient).completeFuture); + futures.add(makeSimpleRequestAndReturnResponseHandler(customClient, 1000).completeFuture); } assertThatThrownBy(() -> CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join()) @@ -659,7 +661,7 @@ public void testExceptionMessageChanged_WhenConnectionTimeoutErrorEncountered() List> futures = new ArrayList<>(); for (int i = 0; i < 2; i++) { - futures.add(makeSimpleRequestAndReturnResponseHandler(customClient).completeFuture); + futures.add(makeSimpleRequestAndReturnResponseHandler(customClient, 1000).completeFuture); } assertThatThrownBy(() -> CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join()) @@ -680,22 +682,102 @@ public void createNettyClient_ReadWriteTimeoutCanBeZero() throws Exception { customClient.close(); } + @Test + public void metricsAreCollectedWhenMaxPendingConnectionAcquisitionsAreExceeded() throws Exception { + SdkAsyncHttpClient customClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(1) + .maxPendingConnectionAcquires(1) + .build(); + + List handlers = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + handlers.add(makeSimpleRequestAndReturnResponseHandler(customClient, 1000)); + } + + for (RecordingResponseHandler handler : handlers) { + try { + handler.executionFuture.join(); + } catch (Exception e) { + // Ignored. + } + + MetricCollection metrics = handler.collector.collect(); + assertThat(metrics.metricValues(HttpMetric.HTTP_CLIENT_NAME)).containsExactly("NettyNio"); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(1); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES)).allSatisfy(a -> assertThat(a).isBetween(0, 9)); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY)).allSatisfy(a -> assertThat(a).isBetween(0, 1)); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).allSatisfy(a -> assertThat(a).isBetween(0, 1)); + } + + customClient.close(); + } + + @Test + public void metricsAreCollectedForSuccessfulCalls() throws Exception { + SdkAsyncHttpClient customClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(10) + .build(); + + RecordingResponseHandler handler = makeSimpleRequestAndReturnResponseHandler(customClient); + + handler.executionFuture.get(10, TimeUnit.SECONDS); + + Thread.sleep(5_000); + MetricCollection metrics = handler.collector.collect(); + assertThat(metrics.metricValues(HttpMetric.HTTP_CLIENT_NAME)).containsExactly("NettyNio"); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(10); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY).get(0)).isBetween(0, 1); + + customClient.close(); + } + + @Test + public void metricsAreCollectedForClosedClientCalls() throws Exception { + SdkAsyncHttpClient customClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(10) + .build(); + customClient.close(); + + RecordingResponseHandler handler = makeSimpleRequestAndReturnResponseHandler(customClient); + + try { + handler.executionFuture.get(10, TimeUnit.SECONDS); + } catch (Exception e) { + // Expected + } + + MetricCollection metrics = handler.collector.collect(); + assertThat(metrics.metricValues(HttpMetric.HTTP_CLIENT_NAME)).containsExactly("NettyNio"); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(10); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES)).containsExactly(0); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY)).containsExactly(0); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY).get(0)).isBetween(0, 1); + } + private void verifyChannelRelease(Channel channel) throws InterruptedException { Thread.sleep(1000); assertThat(channel.attr(AttributeKey.valueOf("channelPool")).get()).isNull(); } private RecordingResponseHandler makeSimpleRequestAndReturnResponseHandler(SdkAsyncHttpClient client) throws Exception { + return makeSimpleRequestAndReturnResponseHandler(client, null); + } + + private RecordingResponseHandler makeSimpleRequestAndReturnResponseHandler(SdkAsyncHttpClient client, Integer delayInMillis) + throws Exception { String body = randomAlphabetic(10); URI uri = URI.create("http://localhost:" + mockServer.port()); - stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withBody(body).withFixedDelay(1000))); + stubFor(any(urlPathEqualTo("/")).willReturn(aResponse().withBody(body).withFixedDelay(delayInMillis))); SdkHttpRequest request = createRequest(uri); RecordingResponseHandler recorder = new RecordingResponseHandler(); - client.execute(AsyncExecuteRequest.builder() - .request(request) - .requestContentPublisher(createProvider("")) - .responseHandler(recorder) - .build()); + recorder.executionFuture = client.execute(AsyncExecuteRequest.builder() + .request(request) + .requestContentPublisher(createProvider("")) + .responseHandler(recorder) + .metricCollector(recorder.collector) + .build()); return recorder; } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java index 9a0b45094eec..f797a760fdf7 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/ProxyWireMockTest.java @@ -31,6 +31,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpMethod; import software.amazon.awssdk.http.async.AsyncExecuteRequest; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingResponseHandler.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingResponseHandler.java index 52989a2bb966..bfbee3bc57b5 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingResponseHandler.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/RecordingResponseHandler.java @@ -30,12 +30,15 @@ import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; import software.amazon.awssdk.http.async.SdkHttpResponseHandler; import software.amazon.awssdk.http.async.SimpleSubscriber; +import software.amazon.awssdk.metrics.MetricCollector; public final class RecordingResponseHandler implements SdkAsyncHttpResponseHandler { List responses = new ArrayList<>(); private StringBuilder bodyParts = new StringBuilder(); CompletableFuture completeFuture = new CompletableFuture<>(); + CompletableFuture executionFuture = null; + MetricCollector collector = MetricCollector.create("test"); @Override public void onHeaders(SdkHttpResponse response) { diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/GoAwayTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/GoAwayTest.java index f46480dc29a2..957dcaa7fc71 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/GoAwayTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/GoAwayTest.java @@ -65,7 +65,7 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; -import software.amazon.awssdk.http.nio.netty.EmptyPublisher; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; import software.amazon.awssdk.http.nio.netty.internal.http2.GoAwayException; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H1ServerErrorTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H1ServerErrorTest.java new file mode 100644 index 000000000000..099ce9378cba --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H1ServerErrorTest.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.fault; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; +import software.amazon.awssdk.utils.AttributeMap; + +import software.amazon.awssdk.http.H1ServerBehaviorTestBase; + + +/** + * Testing the scenario where h1 server sends 5xx errors. + */ +public class H1ServerErrorTest extends H1ServerBehaviorTestBase { + private SdkAsyncHttpClient netty; + + @Override + protected SdkAsyncHttpClient getTestClient() { return netty; } + + @Before + public void setup() throws Exception { + super.setup(); + + netty = NettyNioAsyncHttpClient.builder() + .eventLoopGroup(SdkEventLoopGroup.builder().numberOfThreads(2).build()) + .protocol(Protocol.HTTP1_1) + .buildWithDefaults(AttributeMap.builder().put(TRUST_ALL_CERTIFICATES, true).build()); + } + + + @After + public void teardown() throws InterruptedException { + super.teardown(); + + if (netty != null) { + netty.close(); + } + netty = null; + } + + @Test + public void connectionReceiveServerErrorStatusShouldNotReuseConnection() { + assertThat(netty).isNotNull(); + super.connectionReceiveServerErrorStatusShouldNotReuseConnection(); + } + + @Test + public void connectionReceiveOkStatusShouldReuseConnection() { + assertThat(netty).isNotNull(); + super.connectionReceiveOkStatusShouldReuseConnection(); + } +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H2ServerErrorTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H2ServerErrorTest.java new file mode 100644 index 000000000000..bf22b813b15e --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/H2ServerErrorTest.java @@ -0,0 +1,192 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.fault; + +import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR; +import static io.netty.handler.codec.http.HttpResponseStatus.OK; +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES; +import static software.amazon.awssdk.http.HttpTestUtils.sendGetRequest; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http2.DefaultHttp2DataFrame; +import io.netty.handler.codec.http2.DefaultHttp2Headers; +import io.netty.handler.codec.http2.DefaultHttp2HeadersFrame; +import io.netty.handler.codec.http2.Http2DataFrame; +import io.netty.handler.codec.http2.Http2Frame; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2Headers; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import io.netty.handler.codec.http2.Http2Settings; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.SelfSignedCertificate; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import software.amazon.awssdk.http.Protocol; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; +import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.awssdk.utils.Logger; + +/** + * Testing the scenario where h2 server sends 5xx errors. + */ +public class H2ServerErrorTest { + private static final Logger LOGGER = Logger.loggerFor(ServerNotRespondingTest.class); + private SdkAsyncHttpClient netty; + private Server server; + + @Before + public void setup() throws Exception { + server = new Server(); + server.init(); + + netty = NettyNioAsyncHttpClient.builder() + .eventLoopGroup(SdkEventLoopGroup.builder().numberOfThreads(3).build()) + .protocol(Protocol.HTTP2) + .buildWithDefaults(AttributeMap.builder().put(TRUST_ALL_CERTIFICATES, true).build()); + } + + @After + public void teardown() throws InterruptedException { + if (server != null) { + server.shutdown(); + } + server = null; + + if (netty != null) { + netty.close(); + } + netty = null; + } + + @Test + public void serviceReturn500_newRequestShouldUseNewConnection() { + server.return500OnFirstRequest = true; + CompletableFuture firstRequest = sendGetRequest(server.port(), netty); + firstRequest.join(); + + sendGetRequest(server.port(), netty).join(); + assertThat(server.h2ConnectionCount.get()).isEqualTo(2); + } + + @Test + public void serviceReturn200_newRequestShouldReuseNewConnection() { + server.return500OnFirstRequest = false; + CompletableFuture firstRequest = sendGetRequest(server.port(), netty); + firstRequest.join(); + + sendGetRequest(server.port(), netty).join(); + assertThat(server.h2ConnectionCount.get()).isEqualTo(1); + } + + private static class Server extends ChannelInitializer { + private ServerBootstrap bootstrap; + private ServerSocketChannel serverSock; + private String[] channelIds = new String[5]; + private final NioEventLoopGroup group = new NioEventLoopGroup(); + private SslContext sslCtx; + private boolean return500OnFirstRequest; + private AtomicInteger h2ConnectionCount = new AtomicInteger(0); + + void init() throws Exception { + SelfSignedCertificate ssc = new SelfSignedCertificate(); + sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); + + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .group(group) + .childHandler(this); + + serverSock = (ServerSocketChannel) bootstrap.bind(0).sync().channel(); + } + + @Override + protected void initChannel(Channel ch) { + channelIds[h2ConnectionCount.get()] = ch.id().asShortText(); + LOGGER.debug(() -> "init channel " + ch); + h2ConnectionCount.incrementAndGet(); + + ChannelPipeline pipeline = ch.pipeline(); + pipeline.addLast(sslCtx.newHandler(ch.alloc())); + + + Http2FrameCodec http2Codec = Http2FrameCodecBuilder.forServer() + .autoAckPingFrame(true) + .initialSettings(Http2Settings.defaultSettings().maxConcurrentStreams(1)) + .build(); + + Http2MultiplexHandler http2Handler = new Http2MultiplexHandler(new ChannelInitializer() { + @Override + protected void initChannel(Channel ch) throws Exception { + ch.pipeline().addLast(new MightReturn500StreamFrameHandler()); + } + }); + + pipeline.addLast(http2Codec); + pipeline.addLast(http2Handler); + } + + public void shutdown() throws InterruptedException { + group.shutdownGracefully().await(); + serverSock.close(); + } + + public int port() { + return serverSock.localAddress().getPort(); + } + + private class MightReturn500StreamFrameHandler extends SimpleChannelInboundHandler { + + @Override + protected void channelRead0(ChannelHandlerContext ctx, Http2Frame frame) { + if (frame instanceof Http2DataFrame) { + DefaultHttp2DataFrame dataFrame = new DefaultHttp2DataFrame(true); + + // returning 500 this is channel 1 + if (channelIds[0].equals(ctx.channel().parent().id().asShortText()) && return500OnFirstRequest) { + LOGGER.info(() -> "This is the first request, returning 500" + ctx.channel()); + Http2Headers headers = new DefaultHttp2Headers().status(INTERNAL_SERVER_ERROR.codeAsText()); + ctx.write(new DefaultHttp2HeadersFrame(headers, false)); + ctx.write(new DefaultHttp2DataFrame(true)); + ctx.flush(); + } else { + LOGGER.info(() -> "return empty data " + ctx.channel() + " frame " + frame.getClass()); + Http2Headers headers = new DefaultHttp2Headers().status(OK.codeAsText()); + ctx.write(new DefaultHttp2HeadersFrame(headers, false)); + ctx.write(dataFrame); + ctx.flush(); + } + } + } + } + } + +} diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/PingTimeoutTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/PingTimeoutTest.java index a309addf27ff..f88c5af2bfcd 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/PingTimeoutTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/PingTimeoutTest.java @@ -67,7 +67,7 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; -import software.amazon.awssdk.http.nio.netty.EmptyPublisher; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.nio.netty.Http2Configuration; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.internal.http2.PingFailedException; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerCloseConnectionTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerCloseConnectionTest.java index 88e0c50ff43a..cc6fbda166b5 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerCloseConnectionTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerCloseConnectionTest.java @@ -63,7 +63,7 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; -import software.amazon.awssdk.http.nio.netty.EmptyPublisher; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; import software.amazon.awssdk.utils.AttributeMap; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerNotRespondingTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerNotRespondingTest.java index 92d3624febf1..88eb36716106 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerNotRespondingTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/fault/ServerNotRespondingTest.java @@ -65,7 +65,7 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; -import software.amazon.awssdk.http.nio.netty.EmptyPublisher; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; import software.amazon.awssdk.utils.AttributeMap; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPoolTest.java index 235223a5d5be..16b320167cc0 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/CancellableAcquireChannelPoolTest.java @@ -18,7 +18,6 @@ import io.netty.channel.Channel; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.Promise; @@ -47,7 +46,7 @@ public class CancellableAcquireChannelPoolTest { private EventExecutor eventExecutor; @Mock - private ChannelPool mockDelegatePool; + private SdkChannelPool mockDelegatePool; private Channel channel; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ConnectionReaperTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ConnectionReaperTest.java index 33fb9d9b906b..0ce25a0f6ebb 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ConnectionReaperTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ConnectionReaperTest.java @@ -43,7 +43,7 @@ import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; -import software.amazon.awssdk.http.nio.netty.EmptyPublisher; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.RecordingResponseHandler; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandlerTest.java index d28cf1558cd5..b80cd8e809d0 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandlerTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/FutureCancelHandlerTest.java @@ -23,9 +23,7 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.DefaultEventLoopGroup; import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.util.DefaultAttributeMap; import java.io.IOException; import java.util.concurrent.CancellationException; @@ -53,7 +51,7 @@ public class FutureCancelHandlerTest { private Channel channel; @Mock - private ChannelPool channelPool; + private SdkChannelPool channelPool; private RequestContext requestContext; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPoolTest.java index 5e6213972598..cdbf063c9adc 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HandlerRemovingChannelPoolTest.java @@ -25,7 +25,6 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelPipeline; import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; import io.netty.handler.timeout.ReadTimeoutHandler; @@ -42,7 +41,7 @@ @RunWith(MockitoJUnitRunner.class) public class HandlerRemovingChannelPoolTest { @Mock - private ChannelPool channelPool; + private SdkChannelPool channelPool; @Mock private SdkAsyncHttpResponseHandler responseHandler; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPoolTest.java index 66a84fafa57e..e610884e3eff 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/HealthCheckedChannelPoolTest.java @@ -28,7 +28,6 @@ import io.netty.channel.Channel; import io.netty.channel.EventLoop; import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.util.Attribute; import io.netty.util.concurrent.DefaultPromise; import io.netty.util.concurrent.Future; @@ -53,7 +52,7 @@ public class HealthCheckedChannelPoolTest { private EventLoopGroup eventLoopGroup = Mockito.mock(EventLoopGroup.class); private EventLoop eventLoop = Mockito.mock(EventLoop.class); - private ChannelPool downstreamChannelPool = Mockito.mock(ChannelPool.class); + private SdkChannelPool downstreamChannelPool = Mockito.mock(SdkChannelPool.class); private List channels = new ArrayList<>(); private ScheduledFuture scheduledFuture = Mockito.mock(ScheduledFuture.class); private Attribute attribute = mock(Attribute.class); diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPoolTest.java new file mode 100644 index 000000000000..a6d04b368be7 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/IdleConnectionCountingChannelPoolTest.java @@ -0,0 +1,210 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; + +import io.netty.channel.Channel; +import io.netty.channel.EventLoop; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.pool.ChannelPool; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.metrics.MetricCollector; + +public class IdleConnectionCountingChannelPoolTest { + private EventLoopGroup eventLoopGroup; + private ChannelPool delegatePool; + private IdleConnectionCountingChannelPool idleCountingPool; + + @Before + public void setup() { + delegatePool = mock(ChannelPool.class); + eventLoopGroup = new NioEventLoopGroup(4); + idleCountingPool = new IdleConnectionCountingChannelPool(eventLoopGroup.next(), delegatePool); + } + + @After + public void teardown() { + eventLoopGroup.shutdownGracefully(); + } + + @Test(timeout = 5_000) + public void acquiresAndReleasesOfNewChannelsIncreaseCount() throws InterruptedException { + stubDelegatePoolAcquires(createSuccessfulAcquire(), createSuccessfulAcquire()); + stubDelegatePoolReleasesForSuccess(); + + assertThat(getIdleConnectionCount()).isEqualTo(0); + + Channel firstChannel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + Channel secondChannel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + idleCountingPool.release(firstChannel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(1); + + idleCountingPool.release(secondChannel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(2); + } + + @Test(timeout = 5_000) + public void channelsClosedInTheDelegatePoolAreNotCounted() throws InterruptedException { + stubDelegatePoolAcquires(createSuccessfulAcquire()); + stubDelegatePoolReleasesForSuccess(); + + assertThat(getIdleConnectionCount()).isEqualTo(0); + + Channel channel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + idleCountingPool.release(channel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(1); + + channel.close().await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + } + + @Test(timeout = 5_000) + public void channelsClosedWhenCheckedOutAreNotCounted() throws InterruptedException { + stubDelegatePoolAcquires(createSuccessfulAcquire()); + stubDelegatePoolReleasesForSuccess(); + + assertThat(getIdleConnectionCount()).isEqualTo(0); + + Channel channel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + channel.close().await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + idleCountingPool.release(channel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + } + + @Test + public void checkingOutAnIdleChannelIsCountedCorrectly() throws InterruptedException { + Future successfulAcquire = createSuccessfulAcquire(); + stubDelegatePoolAcquires(successfulAcquire, successfulAcquire); + stubDelegatePoolReleasesForSuccess(); + + assertThat(getIdleConnectionCount()).isEqualTo(0); + + Channel channel1 = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + + idleCountingPool.release(channel1).await(); + assertThat(getIdleConnectionCount()).isEqualTo(1); + + Channel channel2 = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + assertThat(channel1).isEqualTo(channel2); + } + + @Test + public void stochastic_rapidAcquireReleaseIsCalculatedCorrectly() throws InterruptedException { + Future successfulAcquire = createSuccessfulAcquire(); + Channel expectedChannel = successfulAcquire.getNow(); + stubDelegatePoolAcquires(successfulAcquire); + stubDelegatePoolReleasesForSuccess(); + + for (int i = 0; i < 1000; ++i) { + Channel channel = idleCountingPool.acquire().await().getNow(); + assertThat(channel).isEqualTo(expectedChannel); + assertThat(getIdleConnectionCount()).isEqualTo(0); + idleCountingPool.release(channel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(1); + } + } + + @Test + public void stochastic_rapidAcquireReleaseCloseIsCalculatedCorrectly() throws InterruptedException { + stubDelegatePoolAcquiresForSuccess(); + stubDelegatePoolReleasesForSuccess(); + + for (int i = 0; i < 1000; ++i) { + Channel channel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + idleCountingPool.release(channel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(1); + channel.close().await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + } + } + + @Test + public void stochastic_rapidAcquireCloseReleaseIsCalculatedCorrectly() throws InterruptedException { + stubDelegatePoolAcquiresForSuccess(); + stubDelegatePoolReleasesForSuccess(); + + for (int i = 0; i < 1000; ++i) { + Channel channel = idleCountingPool.acquire().await().getNow(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + channel.close().await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + idleCountingPool.release(channel).await(); + assertThat(getIdleConnectionCount()).isEqualTo(0); + } + } + + private int getIdleConnectionCount() { + MetricCollector metricCollector = MetricCollector.create("test"); + idleCountingPool.collectChannelPoolMetrics(metricCollector).join(); + return metricCollector.collect().metricValues(HttpMetric.AVAILABLE_CONCURRENCY).get(0); + } + + @SafeVarargs + private final void stubDelegatePoolAcquires(Future result, Future... extraResults) { + Mockito.when(delegatePool.acquire(any())).thenReturn(result, extraResults); + } + + private void stubDelegatePoolAcquiresForSuccess() { + Mockito.when(delegatePool.acquire(any())).thenAnswer(a -> createSuccessfulAcquire()); + } + + private void stubDelegatePoolReleasesForSuccess() { + Mockito.when(delegatePool.release(any())).thenAnswer((Answer>) invocation -> { + Channel channel = invocation.getArgumentAt(0, Channel.class); + Promise result = channel.eventLoop().newPromise(); + return result.setSuccess(channel); + }); + } + + private Future createSuccessfulAcquire() { + try { + EventLoop eventLoop = this.eventLoopGroup.next(); + + Promise channelPromise = eventLoop.newPromise(); + MockChannel channel = new MockChannel(); + eventLoop.register(channel); + channelPromise.setSuccess(channel); + + return channelPromise; + } catch (Exception e) { + throw new Error(e); + } + } +} \ No newline at end of file diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutorTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutorTest.java index be7093195e55..c4a915991db0 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutorTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/NettyRequestExecutorTest.java @@ -25,7 +25,6 @@ import io.netty.channel.EventLoop; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.pool.ChannelPool; import io.netty.util.concurrent.Promise; import java.util.concurrent.CompletableFuture; import org.junit.After; @@ -38,7 +37,7 @@ public class NettyRequestExecutorTest { - private ChannelPool mockChannelPool; + private SdkChannelPool mockChannelPool; private EventLoopGroup eventLoopGroup; @@ -48,7 +47,7 @@ public class NettyRequestExecutorTest { @Before public void setup() { - mockChannelPool = mock(ChannelPool.class); + mockChannelPool = mock(SdkChannelPool.class); eventLoopGroup = new NioEventLoopGroup(); diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java index 068e870e3209..bed4a8063a65 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/PublisherAdapterTest.java @@ -25,22 +25,16 @@ import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.REQUEST_CONTEXT_KEY; -import software.amazon.awssdk.http.nio.netty.internal.nrs.DefaultStreamedHttpResponse; -import software.amazon.awssdk.http.nio.netty.internal.nrs.StreamedHttpResponse; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.EmptyByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.EventLoopGroup; -import io.netty.channel.pool.ChannelPool; -import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.DefaultHttpContent; import io.netty.handler.codec.http.DefaultHttpResponse; import io.netty.handler.codec.http.EmptyHttpHeaders; import io.netty.handler.codec.http.HttpContent; -import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpVersion; -import io.netty.util.AttributeKey; import io.reactivex.Flowable; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; @@ -54,6 +48,8 @@ import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.http.nio.netty.internal.nrs.DefaultStreamedHttpResponse; +import software.amazon.awssdk.http.nio.netty.internal.nrs.StreamedHttpResponse; @RunWith(MockitoJUnitRunner.class) public class PublisherAdapterTest { @@ -64,7 +60,7 @@ public class PublisherAdapterTest { private MockChannel channel; @Mock - private ChannelPool channelPool; + private SdkChannelPool channelPool; @Mock private EventLoopGroup eventLoopGroup; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ResponseCompletionTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ResponseCompletionTest.java index 0f6f786eb9d0..56601cf8bdb3 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ResponseCompletionTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ResponseCompletionTest.java @@ -60,7 +60,7 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; -import software.amazon.awssdk.http.nio.netty.EmptyPublisher; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; import software.amazon.awssdk.utils.AttributeMap; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPoolTest.java index ca40d14cb6e0..fe5ae0948dc1 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2MultiplexedChannelPoolTest.java @@ -31,7 +31,6 @@ import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.codec.http2.Http2Connection; -import io.netty.handler.codec.http2.Http2FrameCodec; import io.netty.handler.codec.http2.Http2LocalFlowController; import io.netty.handler.codec.http2.Http2Stream; import io.netty.util.concurrent.DefaultPromise; @@ -41,14 +40,16 @@ import java.io.IOException; import java.util.Collections; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.mockito.InOrder; import org.mockito.Mockito; +import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; /** * Tests for {@link Http2MultiplexedChannelPool}. @@ -258,4 +259,92 @@ public void acquire_shouldExpandConnectionWindowSizeProportionally() { channel.close(); } } + + @Test + public void metricsShouldSumAllChildChannels() throws InterruptedException { + int maxConcurrentStream = 2; + EmbeddedChannel channel1 = newHttp2Channel(); + EmbeddedChannel channel2 = newHttp2Channel(); + channel1.attr(ChannelAttributeKey.MAX_CONCURRENT_STREAMS).set((long) maxConcurrentStream); + channel2.attr(ChannelAttributeKey.MAX_CONCURRENT_STREAMS).set((long) maxConcurrentStream); + + try { + ChannelPool connectionPool = Mockito.mock(ChannelPool.class); + + loopGroup.register(channel1).awaitUninterruptibly(); + loopGroup.register(channel2).awaitUninterruptibly(); + Promise channel1Promise = new DefaultPromise<>(loopGroup.next()); + Promise channel2Promise = new DefaultPromise<>(loopGroup.next()); + channel1Promise.setSuccess(channel1); + channel2Promise.setSuccess(channel2); + + Mockito.when(connectionPool.acquire()).thenReturn(channel1Promise, channel2Promise); + + Http2MultiplexedChannelPool h2Pool = new Http2MultiplexedChannelPool(connectionPool, + Http2MultiplexedChannelPoolTest.loopGroup, + Collections.emptySet(), null); + MetricCollection metrics; + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(0); + + doAcquire(channel1, channel2, h2Pool); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(1); + + doAcquire(channel1, channel2, h2Pool); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(0); + + doAcquire(channel1, channel2, h2Pool); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(1); + + Channel lastAcquire = doAcquire(channel1, channel2, h2Pool); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(0); + + lastAcquire.close(); + h2Pool.release(lastAcquire).awaitUninterruptibly(); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(1); + + channel1.close(); + h2Pool.release(channel1); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(1); + + channel2.close(); + + metrics = getMetrics(h2Pool); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY)).containsExactly(0); + } finally { + channel1.close(); + channel2.close(); + } + } + + private Channel doAcquire(EmbeddedChannel channel1, EmbeddedChannel channel2, Http2MultiplexedChannelPool h2Pool) { + Future acquire = h2Pool.acquire(); + acquire.awaitUninterruptibly(); + runPendingTasks(channel1, channel2); + return acquire.getNow(); + } + + private void runPendingTasks(EmbeddedChannel channel1, EmbeddedChannel channel2) { + channel1.runPendingTasks(); + channel2.runPendingTasks(); + } + + private MetricCollection getMetrics(Http2MultiplexedChannelPool h2Pool) { + MetricCollector metricCollector = MetricCollector.create("test"); + h2Pool.collectChannelPoolMetrics(metricCollector); + return metricCollector.collect(); + } } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandlerTest.java index 6eac08f8269d..ec1519a2b17e 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandlerTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/Http2StreamExceptionHandlerTest.java @@ -85,7 +85,7 @@ public void timeoutException_shouldFireExceptionAndPropagateException() { when(streamChannel.parent()).thenReturn(embeddedParentChannel); handler.exceptionCaught(context, ReadTimeoutException.INSTANCE); - assertThat(verifyExceptionHandler.exceptionCaught).isExactlyInstanceOf(Http2StreamExceptionHandler.Http2StreamIoException.class); + assertThat(verifyExceptionHandler.exceptionCaught).isExactlyInstanceOf(Http2ConnectionTerminatingException.class); verify(context).fireExceptionCaught(ReadTimeoutException.INSTANCE); } @@ -95,7 +95,7 @@ public void ioException_shouldFireExceptionAndPropagateException() { when(streamChannel.parent()).thenReturn(embeddedParentChannel); handler.exceptionCaught(context, ioException); - assertThat(verifyExceptionHandler.exceptionCaught).isExactlyInstanceOf(Http2StreamExceptionHandler.Http2StreamIoException.class); + assertThat(verifyExceptionHandler.exceptionCaught).isExactlyInstanceOf(Http2ConnectionTerminatingException.class); verify(context).fireExceptionCaught(ioException); } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPoolTest.java index bd2728465c76..170bce1e17f6 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPoolTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/HttpOrHttp2ChannelPoolTest.java @@ -21,6 +21,7 @@ import static org.mockito.Mockito.when; import static software.amazon.awssdk.http.SdkHttpConfigurationOption.CONNECTION_ACQUIRE_TIMEOUT; import static software.amazon.awssdk.http.SdkHttpConfigurationOption.MAX_PENDING_CONNECTION_ACQUIRES; +import static software.amazon.awssdk.http.SdkHttpConfigurationOption.REAP_IDLE_CONNECTIONS; import static software.amazon.awssdk.http.nio.netty.internal.ChannelAttributeKey.PROTOCOL_FUTURE; import io.netty.channel.Channel; @@ -39,9 +40,12 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.nio.netty.internal.MockChannel; import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.utils.AttributeMap; /** @@ -74,6 +78,7 @@ public void methodSetup() { new NettyConfiguration(AttributeMap.builder() .put(CONNECTION_ACQUIRE_TIMEOUT, Duration.ofSeconds(1)) .put(MAX_PENDING_CONNECTION_ACQUIRES, 5) + .put(REAP_IDLE_CONNECTIONS, false) .build())); } @@ -202,4 +207,64 @@ public void protocolConfigComplete_poolClosed_closesDelegatePool() throws Interr channel.close(); } } + + @Test(timeout = 5_000) + public void incompleteProtocolFutureDelaysMetricsDelegationAndForwardsFailures() throws InterruptedException { + Promise acquirePromise = eventLoopGroup.next().newPromise(); + when(mockDelegatePool.acquire()).thenReturn(acquirePromise); + + // startConnection + httpOrHttp2ChannelPool.acquire(); + + // query for metrics before the config can complete (we haven't completed acquirePromise yet) + CompletableFuture metrics = httpOrHttp2ChannelPool.collectChannelPoolMetrics(MetricCollector.create("test")); + + Thread.sleep(500); + + assertThat(metrics.isDone()).isFalse(); + acquirePromise.setFailure(new RuntimeException("Some failure")); + + Thread.sleep(500); + + assertThat(metrics.isCompletedExceptionally()).isTrue(); + } + + @Test(timeout = 5_000) + public void incompleteProtocolFutureDelaysMetricsDelegationAndForwardsSuccessForHttp1() throws Exception { + incompleteProtocolFutureDelaysMetricsDelegationAndForwardsSuccessForProtocol(Protocol.HTTP1_1); + } + + @Test(timeout = 5_000) + public void incompleteProtocolFutureDelaysMetricsDelegationAndForwardsSuccessForHttp2() throws Exception { + incompleteProtocolFutureDelaysMetricsDelegationAndForwardsSuccessForProtocol(Protocol.HTTP2); + } + + public void incompleteProtocolFutureDelaysMetricsDelegationAndForwardsSuccessForProtocol(Protocol protocol) throws Exception { + Promise acquirePromise = eventLoopGroup.next().newPromise(); + when(mockDelegatePool.acquire()).thenReturn(acquirePromise); + + // startConnection + httpOrHttp2ChannelPool.acquire(); + + // query for metrics before the config can complete (we haven't completed acquirePromise yet) + MetricCollector metricCollector = MetricCollector.create("foo"); + CompletableFuture metricsFuture = httpOrHttp2ChannelPool.collectChannelPoolMetrics(metricCollector); + + Thread.sleep(500); + + assertThat(metricsFuture.isDone()).isFalse(); + + Channel channel = new MockChannel(); + eventLoopGroup.register(channel); + channel.attr(PROTOCOL_FUTURE).set(CompletableFuture.completedFuture(protocol)); + acquirePromise.setSuccess(channel); + + metricsFuture.join(); + MetricCollection metrics = metricCollector.collect(); + + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES).get(0)).isEqualTo(0); + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY).get(0)).isEqualTo(4); + assertThat(metrics.metricValues(HttpMetric.AVAILABLE_CONCURRENCY).get(0)).isBetween(0, 1); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY).get(0)).isBetween(0, 1); + } } diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/WindowSizeTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/WindowSizeTest.java index 7210708d7b59..e33ddfcb6e17 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/WindowSizeTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/http2/WindowSizeTest.java @@ -54,7 +54,7 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; -import software.amazon.awssdk.http.nio.netty.EmptyPublisher; +import software.amazon.awssdk.http.EmptyPublisher; import software.amazon.awssdk.http.nio.netty.Http2Configuration; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPoolTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPoolTest.java new file mode 100644 index 000000000000..c429b2e7b882 --- /dev/null +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/BetterFixedChannelPoolTest.java @@ -0,0 +1,188 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http.nio.netty.internal.utils; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.mock; + +import io.netty.channel.Channel; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.nio.netty.internal.MockChannel; +import software.amazon.awssdk.http.nio.netty.internal.SdkChannelPool; +import software.amazon.awssdk.http.nio.netty.internal.utils.BetterFixedChannelPool.AcquireTimeoutAction; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.utils.CompletableFutureUtils; + +public class BetterFixedChannelPoolTest { + private static EventLoopGroup eventLoopGroup; + + private BetterFixedChannelPool channelPool; + private SdkChannelPool delegatePool; + + @BeforeClass + public static void setupClass() { + eventLoopGroup = new NioEventLoopGroup(1); + } + + @AfterClass + public static void teardownClass() throws InterruptedException { + eventLoopGroup.shutdownGracefully().await(); + } + + @Before + public void setup() { + delegatePool = mock(SdkChannelPool.class); + + channelPool = BetterFixedChannelPool.builder() + .channelPool(delegatePool) + .maxConnections(2) + .maxPendingAcquires(2) + .acquireTimeoutAction(AcquireTimeoutAction.FAIL) + .acquireTimeoutMillis(10_000) + .executor(eventLoopGroup.next()) + .build(); + } + + @After + public void teardown() { + channelPool.close(); + } + + @Test + public void delegateChannelPoolMetricFailureIsReported() { + Throwable t = new Throwable(); + Mockito.when(delegatePool.collectChannelPoolMetrics(any())).thenReturn(CompletableFutureUtils.failedFuture(t)); + + CompletableFuture result = channelPool.collectChannelPoolMetrics(MetricCollector.create("test")); + waitForCompletion(result); + assertThat(result).hasFailedWithThrowableThat().isEqualTo(t); + } + + @Test(timeout = 5_000) + public void metricCollectionHasCorrectValuesAfterAcquiresAndReleases() throws Exception { + List> acquirePromises = Collections.synchronizedList(new ArrayList<>()); + Mockito.when(delegatePool.acquire(isA(Promise.class))).thenAnswer(i -> { + Promise promise = eventLoopGroup.next().newPromise(); + acquirePromises.add(promise); + return promise; + }); + + List> releasePromises = Collections.synchronizedList(new ArrayList<>()); + Mockito.when(delegatePool.release(isA(Channel.class), isA(Promise.class))).thenAnswer(i -> { + Promise promise = i.getArgumentAt(1, Promise.class); + releasePromises.add(promise); + return promise; + }); + + Mockito.when(delegatePool.collectChannelPoolMetrics(any())).thenReturn(CompletableFuture.completedFuture(null)); + + assertConnectionsCheckedOutAndPending(0, 0); + + channelPool.acquire(); + completePromise(acquirePromises, 0); + assertConnectionsCheckedOutAndPending(1, 0); + + channelPool.acquire(); + completePromise(acquirePromises, 1); + assertConnectionsCheckedOutAndPending(2, 0); + + channelPool.acquire(); + assertConnectionsCheckedOutAndPending(2, 1); + + channelPool.acquire(); + assertConnectionsCheckedOutAndPending(2, 2); + + Future f = channelPool.acquire(); + assertConnectionsCheckedOutAndPending(2, 2); + assertThat(f.isSuccess()).isFalse(); + assertThat(f.cause()).isInstanceOf(IllegalStateException.class); + + channelPool.release(acquirePromises.get(1).getNow()); + assertConnectionsCheckedOutAndPending(2, 2); + + completePromise(releasePromises, 0); + completePromise(acquirePromises, 2); + assertConnectionsCheckedOutAndPending(2, 1); + + channelPool.release(acquirePromises.get(2).getNow()); + completePromise(releasePromises, 1); + completePromise(acquirePromises, 3); + assertConnectionsCheckedOutAndPending(2, 0); + + channelPool.release(acquirePromises.get(0).getNow()); + completePromise(releasePromises, 2); + assertConnectionsCheckedOutAndPending(1, 0); + + channelPool.release(acquirePromises.get(3).getNow()); + completePromise(releasePromises, 3); + assertConnectionsCheckedOutAndPending(0, 0); + } + + private void completePromise(List> promises, int promiseIndex) throws Exception { + waitForPromise(promises, promiseIndex); + + MockChannel channel = new MockChannel(); + eventLoopGroup.next().register(channel); + promises.get(promiseIndex).setSuccess(channel); + } + + private void waitForPromise(List> promises, int promiseIndex) throws Exception { + while (promises.size() < promiseIndex + 1) { + Thread.sleep(1); + } + } + + private void assertConnectionsCheckedOutAndPending(int checkedOut, int pending) { + MetricCollector metricCollector = MetricCollector.create("foo"); + waitForCompletion(channelPool.collectChannelPoolMetrics(metricCollector)); + + MetricCollection metrics = metricCollector.collect(); + + assertThat(metrics.metricValues(HttpMetric.MAX_CONCURRENCY)).containsExactly(2); + assertThat(metrics.metricValues(HttpMetric.LEASED_CONCURRENCY)).containsExactly(checkedOut); + assertThat(metrics.metricValues(HttpMetric.PENDING_CONCURRENCY_ACQUIRES)).containsExactly(pending); + } + + private void waitForCompletion(CompletableFuture future) { + try { + future.get(5, TimeUnit.SECONDS); + } catch (ExecutionException e) { + return; + } catch (InterruptedException | TimeoutException e) { + throw new Error(e); + } + } +} \ No newline at end of file diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterUnrecoverableException.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtilsTest.java similarity index 53% rename from test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterUnrecoverableException.java rename to http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtilsTest.java index 41d6a190b51a..680057886174 100644 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterUnrecoverableException.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/utils/NettyUtilsTest.java @@ -13,18 +13,18 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.core.waiters; +package software.amazon.awssdk.http.nio.netty.internal.utils; -public class WaiterUnrecoverableException extends RuntimeException { +import static org.assertj.core.api.Assertions.assertThat; - /** - * Constructs a new WaiterUnrecoverableException with the specified error - * message. - * - * @param message Describes the error encountered. - */ - public WaiterUnrecoverableException(String message) { - super(message); - } +import io.netty.util.AttributeKey; +import org.junit.Test; +public class NettyUtilsTest { + @Test + public void testGetOrCreateAttributeKey_calledTwiceWithSameName_returnsSameInstance() { + String attr = "NettyUtilsTest.Foo"; + AttributeKey fooAttr = NettyUtils.getOrCreateAttributeKey(attr); + assertThat(NettyUtils.getOrCreateAttributeKey(attr)).isSameAs(fooAttr); + } } diff --git a/http-clients/pom.xml b/http-clients/pom.xml index 2baeafcd242f..1665e5c14f49 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index 4d2ef3d38d97..c45a1ec52ea2 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml new file mode 100644 index 000000000000..c4da83e835bc --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -0,0 +1,60 @@ + + + 4.0.0 + + software.amazon.awssdk + metric-publishers + 2.13.56-SNAPSHOT + + + cloudwatch-metric-publisher + AWS Java SDK :: Metric Publishers :: CloudWatch + jar + ${awsjavasdk.version}-PREVIEW + + + ${project.parent.version} + 1.8 + + + + + software.amazon.awssdk + cloudwatch + ${awsjavasdk.version} + + + software.amazon.awssdk + annotations + ${awsjavasdk.version} + + + software.amazon.awssdk + sdk-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-core + ${awsjavasdk.version} + + + software.amazon.awssdk + http-client-spi + ${awsjavasdk.version} + + + diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisher.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisher.java new file mode 100644 index 000000000000..143272e14c98 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisher.java @@ -0,0 +1,559 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch; + +import static software.amazon.awssdk.metrics.publishers.cloudwatch.internal.CloudWatchMetricLogger.METRIC_LOGGER; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.SdkPreviewApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.MetricUploader; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.task.AggregateMetricsTask; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.task.UploadMetricsTasks; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.MetricCollectionAggregator; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; +import software.amazon.awssdk.utils.ThreadFactoryBuilder; + +/** + * An implementation of {@link MetricPublisher} that aggregates and uploads metrics to Amazon CloudWatch on a periodic basis. + * + *

This simplifies the process of uploading custom metrics to CloudWatch, and can also be configured on the AWS + * SDK clients directly to upload AWS SDK-specific metrics (e.g. request latencies, failure rates) to CloudWatch. + * + *

Overview + * + *

This publisher aggregates metric data in memory, and periodically uploads it to CloudWatch in a background thread. This + * minimizes the work necessary to upload metrics, allowing the caller to focus on collecting the data. + * + *

The default settings of the metrics publisher are meant to minimize memory usage and CloudWatch cost, while still + * providing a useful amount of insight into the metric data. Care should be taken when overriding the default values on the + * publisher, because they can result in an associated increased in memory usage and CloudWatch cost. + * + *

By default, all metrics are uploaded using summary statistics. This means that only count, maximum, minimum, sum and + * average data is available in CloudWatch. Metric details (e.g. p90, p99) can be enabled on a per-metric basis using + * {@link Builder#detailedMetrics(Collection)}. + * + *

See {@link Builder} for the configuration values that are available for the publisher, and how they can be used to + * increase the functionality or decrease the cost the publisher. + * + *

Logging + * + * The CloudWatchMetricPublisher logs all aggregation and upload-related logs to the + * {@code software.amazon.awssdk.metrics.publishers.cloudwatch} namespace. To determine how many metrics are being uploaded + * successfully without checking the CloudWatch console, you can check for a "success" message at the DEBUG level. At the TRACE + * level, you can see exactly which metrics are being uploaded. + * + *

Configuring AWS SDK clients to upload client metrics + * + * TODO + * + *

Uploading your own custom metrics + * + * Step 1: Define which metrics you wish to collect + * + *

Metrics are described using the {@link SdkMetric#create} method. When you describe your metric, you specify + * the name that will appear in CloudWatch and the Java data-type of the metric. The metric should be described once for your + * entire application. + * + *

Supported types: (1) {@link Number} types (e.g. {@link Integer}, {@link Double}, etc.), (2) {@link Duration}. + * + *

+ *     // In this and the following examples, we want to collect metrics about calls to a method we have defined: "myMethod"
+ *     public static final class MyMethodMetrics {
+ *         // The number of times "myMethod" has been called.
+ *         private static final SdkMetric<Integer> MY_METHOD_CALL_COUNT =
+ *                 SdkMetric.create("MyMethodCallCount", Integer.class, MetricLevel.INFO, MetricCategory.CUSTOM);
+ *
+ *         // The amount of time that "myMethod" took to execute.
+ *         private static final SdkMetric<Duration> MY_METHOD_LATENCY =
+ *                 SdkMetric.create("MyMethodLatency", Duration.class, MetricLevel.INFO, MetricCategory.CUSTOM);
+ *     }
+ * 
+ * + *

Step 2: Create a {@code CloudWatchMetricPublisher} + * + *

A {@code CloudWatchMetricPublisher} should be created once for your entire application, and be reused wherever it is + * needed. {@code CloudWatchMetricPublisher}s are thread-safe, so there should be no need to create multiple instances. Most + * people create and manage the publisher in their inversion-of-control (IoC) container (e.g. Spring/Dagger/Guice). + * + *

Note: When your application is finished with the {@code CloudWatchMetricPublisher}, make sure to {@link #close()} it. Your + * inversion-of-control container may handle this for you on JVM shutdown. + * + *

See {@link CloudWatchMetricPublisher.Builder} for all available configuration options. + * + *

+ *     // Create a CloudWatchMetricPublisher using a custom namespace.
+ *     MetricPublisher metricPublisher = CloudWatchMetricPublisher.builder()
+ *                                                                .namespace("MyApplication")
+ *                                                                .build();
+ * 
+ * + *

Step 3: Collect and Publish Metrics + * + *

Create and use a {@link MetricCollector} to collect data about your configured metrics. + * + *

+ *     // Call "myMethod" and collect metrics about the call.
+ *     Instant methodCallStartTime = Instant.now();
+ *     myMethod();
+ *     Duration methodCallDuration = Duration.between(methodCallStartTime, Instant.now());
+ *
+ *     // Write the metrics to the CloudWatchMetricPublisher.
+ *     MetricCollector metricCollector = MetricCollector.create("MyMethodCall");
+ *     metricCollector.reportMetric(MyCustomMetrics.MY_METHOD_CALL_COUNT, 1);
+ *     metricCollector.reportMetric(MyCustomMetrics.MY_METHOD_LATENCY, methodCallDuration);
+ *     MetricCollection metricCollection = metricCollector.collect();
+ *
+ *     metricPublisher.publish(metricCollection);
+ * 
+ * + *

Warning: Make sure the {@link #close()} this publisher when it is done being used to release all resources it + * consumes. Failure to do so will result in possible thread or file descriptor leaks. + * + * NOTE: This is a Preview API and is subject to change so it should not be used in production. + */ +@SdkPreviewApi +@ThreadSafe +@Immutable +@SdkPublicApi +public final class CloudWatchMetricPublisher implements MetricPublisher { + /** + * The maximum queue size for the internal {@link #executor} that is used to aggregate metric data and upload it to + * CloudWatch. If this value is too high, memory is wasted. If this value is too low, metrics could be dropped. + * + * This value is not currently configurable, because it's unlikely that this is a value that customers should need to modify. + * If customers really need control over this value, we might consider letting them instead configure the + * {@link BlockingQueue} used on the executor. The value here depends on the type of {@code BlockingQueue} in use, and + * we should probably not indirectly couple people to the type of blocking queue we're using. + */ + private static final int MAXIMUM_TASK_QUEUE_SIZE = 128; + + private static final String DEFAULT_NAMESPACE = "AwsSdk/JavaSdk2"; + private static final int DEFAULT_MAXIMUM_CALLS_PER_UPLOAD = 10; + private static final Duration DEFAULT_UPLOAD_FREQUENCY = Duration.ofMinutes(1); + private static final Set> DEFAULT_DIMENSIONS = Stream.of(CoreMetric.SERVICE_ID, + CoreMetric.OPERATION_NAME) + .collect(Collectors.toSet()); + private static final Set DEFAULT_METRIC_CATEGORIES = Collections.singleton(MetricCategory.ALL); + private static final MetricLevel DEFAULT_METRIC_LEVEL = MetricLevel.INFO; + private static final Set> DEFAULT_DETAILED_METRICS = Collections.emptySet(); + + /** + * Whether {@link #close()} should call {@link CloudWatchAsyncClient#close()}. This is false when + * {@link Builder#cloudWatchClient(CloudWatchAsyncClient)} was specified, meaning the customer has to close the client + * themselves. + */ + private final boolean closeClientWithPublisher; + + /** + * The aggregator that takes {@link MetricCollection}s and converts them into {@link PutMetricDataRequest}s. This aggregator + * is *not* thread safe, so it should only ever be accessed from the {@link #executor}'s thread. + */ + private final MetricCollectionAggregator metricAggregator; + + /** + * The uploader that takes {@link PutMetricDataRequest}s and sends them to a {@link CloudWatchAsyncClient}. + */ + private final MetricUploader metricUploader; + + /** + * The executor that executes {@link AggregateMetricsTask}s and {@link UploadMetricsTasks}s. + */ + private final ExecutorService executor; + + /** + * A scheduled executor that periodically schedules a {@link UploadMetricsTasks} on the {@link #executor} thread. Note: this + * executor should never execute the flush task itself, because that needs access to the {@link #metricAggregator}, and the + * {@code metricAggregator} should only ever be accessed from the {@link #executor} thread. + */ + private final ScheduledExecutorService scheduledExecutor; + + /** + * The maximum number of {@link PutMetricDataRequest}s that should ever be executed as part of a single + * {@link UploadMetricsTasks}. + */ + private final int maximumCallsPerUpload; + + private CloudWatchMetricPublisher(Builder builder) { + this.closeClientWithPublisher = resolveCloseClientWithPublisher(builder); + this.metricAggregator = new MetricCollectionAggregator(resolveNamespace(builder), + resolveDimensions(builder), + resolveMetricCategories(builder), + resolveMetricLevel(builder), + resolveDetailedMetrics(builder)); + this.metricUploader = new MetricUploader(resolveClient(builder)); + this.maximumCallsPerUpload = resolveMaximumCallsPerUpload(builder); + + ThreadFactory threadFactory = new ThreadFactoryBuilder().threadNamePrefix("cloud-watch-metric-publisher").build(); + this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory); + + // Do not increase above 1 thread: access to MetricCollectionAggregator is not thread safe. + this.executor = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, + new ArrayBlockingQueue<>(MAXIMUM_TASK_QUEUE_SIZE), + threadFactory); + + long flushFrequencyInMillis = resolveUploadFrequency(builder).toMillis(); + this.scheduledExecutor.scheduleAtFixedRate(this::flushMetrics, + flushFrequencyInMillis, flushFrequencyInMillis, TimeUnit.MILLISECONDS); + } + + private Set resolveMetricCategories(Builder builder) { + return builder.metricCategories == null ? DEFAULT_METRIC_CATEGORIES : new HashSet<>(builder.metricCategories); + } + + private MetricLevel resolveMetricLevel(Builder builder) { + return builder.metricLevel == null ? DEFAULT_METRIC_LEVEL : builder.metricLevel; + } + + private Set> resolveDetailedMetrics(Builder builder) { + return builder.detailedMetrics == null ? DEFAULT_DETAILED_METRICS : new HashSet<>(builder.detailedMetrics); + } + + private Set> resolveDimensions(Builder builder) { + return builder.dimensions == null ? DEFAULT_DIMENSIONS : new HashSet<>(builder.dimensions); + } + + private boolean resolveCloseClientWithPublisher(Builder builder) { + return builder.client == null; + } + + private CloudWatchAsyncClient resolveClient(Builder builder) { + return builder.client == null ? CloudWatchAsyncClient.create() : builder.client; + } + + private Duration resolveUploadFrequency(Builder builder) { + return builder.uploadFrequency == null ? DEFAULT_UPLOAD_FREQUENCY : builder.uploadFrequency; + } + + private String resolveNamespace(Builder builder) { + return builder.namespace == null ? DEFAULT_NAMESPACE : builder.namespace; + } + + private int resolveMaximumCallsPerUpload(Builder builder) { + return builder.maximumCallsPerUpload == null ? DEFAULT_MAXIMUM_CALLS_PER_UPLOAD : builder.maximumCallsPerUpload; + } + + @Override + public void publish(MetricCollection metricCollection) { + try { + executor.submit(new AggregateMetricsTask(metricAggregator, metricCollection)); + } catch (RejectedExecutionException e) { + METRIC_LOGGER.warn(() -> "Some AWS SDK client-side metrics have been dropped because an internal executor did not " + + "accept them. This usually occurs because your publisher has been shut down or you have " + + "generated too many requests for the publisher to handle in a timely fashion.", e); + } + } + + /** + * Flush the metrics (via a {@link UploadMetricsTasks}). In the event that the {@link #executor} task queue is full, this + * this will retry automatically. + */ + private void flushMetrics() { + while (!scheduledExecutor.isShutdown() && + !executor.isShutdown() && + !Thread.currentThread().isInterrupted()) { + try { + executor.submit(new UploadMetricsTasks(metricAggregator, metricUploader, maximumCallsPerUpload)); + break; + } catch (RejectedExecutionException e) { + sleepQuietly(100); + } + } + } + + private void sleepQuietly(int duration) { + try { + Thread.sleep(duration); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + @Override + public void close() { + flushMetrics(); + + scheduledExecutor.shutdownNow(); + executor.shutdown(); + try { + if (!executor.awaitTermination(60, TimeUnit.SECONDS)) { + executor.shutdownNow(); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + executor.shutdownNow(); + } + + metricUploader.close(closeClientWithPublisher); + } + + /** + * Returns {@code true} when the internal executor has been shutdown. + */ + public boolean isShutdown() { + return executor.isShutdown() && scheduledExecutor.isShutdown(); + } + + /** + * Create a new {@link Builder} that can be used to create {@link CloudWatchMetricPublisher}s. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Create a {@link CloudWatchMetricPublisher} using all default values. + */ + public static CloudWatchMetricPublisher create() { + return builder().build(); + } + + /** + * Builder class to construct {@link CloudWatchMetricPublisher} instances. See the individual properties for which + * configuration settings are available. + */ + public static final class Builder { + private CloudWatchAsyncClient client; + private Duration uploadFrequency; + private String namespace; + private Integer maximumCallsPerUpload; + private Collection> dimensions; + private Collection metricCategories; + private MetricLevel metricLevel; + private Collection> detailedMetrics; + + private Builder() { + } + + /** + * Configure the {@link PutMetricDataRequest#namespace()} used for all put-metric-data calls from this publisher. + * + *

If this is not specified, {@code AwsSdk/JavaSdk2} will be used. + */ + public Builder namespace(String namespace) { + this.namespace = namespace; + return this; + } + + /** + * Configure the {@link CloudWatchAsyncClient} instance that should be used to communicate with CloudWatch. + * + *

If this is not specified, the {@code CloudWatchAsyncClient} will be created via + * {@link CloudWatchAsyncClient#create()} (and will be closed when {@link #close()} is invoked). + * + *

If you specify a {@code CloudWatchAsyncClient} via this method, it will not be closed when this publisher + * is closed. You will need to need to manage the lifecycle of the client yourself. + */ + public Builder cloudWatchClient(CloudWatchAsyncClient client) { + this.client = client; + return this; + } + + /** + * Configure the frequency at which aggregated metrics are uploaded to CloudWatch and released from memory. + * + *

If this is not specified, metrics will be uploaded once per minute. + * + *

Smaller values will: (1) reduce the amount of memory used by the library (particularly when + * {@link #detailedMetrics(Collection)} are enabled), (2) increase the number of CloudWatch calls (and therefore + * increase CloudWatch usage cost). + * + *

Larger values will: (1) increase the amount of memory used by the library (particularly when + * {@code detailedMetrics} are enabled), (2) increase the time it takes for metric data to appear in + * CloudWatch, (3) reduce the number of CloudWatch calls (and therefore decrease CloudWatch usage cost). + * + *

Warning: When {@code detailedMetrics} are enabled, all unique metric values are stored in memory until they + * can be published to CloudWatch. A high {@code uploadFrequency} with multiple {@code detailedMetrics} enabled can + * quickly consume heap memory while the values wait to be published to CloudWatch. In memory constrained environments, it + * is recommended to minimize the number of {@code detailedMetrics} configured on the publisher, or to upload metric data + * more frequently. As with all performance and resource concerns, profiling in a production-like environment is + * encouraged. + */ + public Builder uploadFrequency(Duration uploadFrequency) { + this.uploadFrequency = uploadFrequency; + return this; + } + + /** + * Configure the maximum number of {@link CloudWatchAsyncClient#putMetricData(PutMetricDataRequest)} calls that an + * individual "upload" event can make to CloudWatch. Any metrics that would exceed this limit are dropped during the + * upload, logging a warning on the {@code software.amazon.awssdk.metrics.publishers.cloudwatch} namespace. + * + *

The SDK will always attempt to maximize the number of metrics per put-metric-data call, but uploads will be split + * into multiple put-metric-data calls if they include a lot of different metrics or if there are a lot of high-value- + * distribution {@link #detailedMetrics(Collection)} being monitored. + * + *

This value combined with the {@link #uploadFrequency(Duration)} effectively provide a "hard cap" on the number of + * put-metric-data calls, to prevent unbounded cost in the event that too many metrics are enabled by the user. + * + *

If this is not specified, put-metric-data calls will be capped at 10 per upload. + */ + public Builder maximumCallsPerUpload(Integer maximumCallsPerUpload) { + this.maximumCallsPerUpload = maximumCallsPerUpload; + return this; + } + + /** + * Configure the {@link SdkMetric}s that are used to define the {@link Dimension}s metrics are aggregated under. + * + *

If this is not specified, {@link CoreMetric#SERVICE_ID} and {@link CoreMetric#OPERATION_NAME} are used, allowing + * you to compare metrics for different services and operations. + * + *

Warning: Configuring the dimensions incorrectly can result in a large increase in the number of unique + * metrics and put-metric-data calls to cloudwatch, which have an associated monetary cost. Be sure you're choosing your + * metric dimensions wisely, and that you always evaluate the cost of modifying these values on your monthly usage costs. + * + *

Example useful settings: + *

    + *
  • {@code CoreMetric.SERVICE_ID} and {@code CoreMetric.OPERATION_NAME} (default): Separate metrics by service and + * operation, so that you can compare latencies between AWS services and operations.
  • + *
  • {@code CoreMetric.SERVICE_ID}, {@code CoreMetric.OPERATION_NAME} and {@code CoreMetric.HOST_NAME}: Separate + * metrics by service, operation and host so that you can compare latencies across hosts in your fleet. Note: This should + * only be used when your fleet is relatively small. Large fleets result in a large number of unique metrics being + * generated.
  • + *
  • {@code CoreMetric.SERVICE_ID}, {@code CoreMetric.OPERATION_NAME} and {@code HttpMetric.HTTP_CLIENT_NAME}: Separate + * metrics by service, operation and HTTP client type so that you can compare latencies between different HTTP client + * implementations.
  • + *
+ */ + public Builder dimensions(Collection> dimensions) { + this.dimensions = new ArrayList<>(dimensions); + return this; + } + + /** + * @see #dimensions(SdkMetric[]) + */ + @SafeVarargs + public final Builder dimensions(SdkMetric... dimensions) { + return dimensions(Arrays.asList(dimensions)); + } + + /** + * Configure the {@link MetricCategory}s that should be uploaded to CloudWatch. + * + *

If this is not specified, {@link MetricCategory#ALL} is used. + * + *

All {@link SdkMetric}s are associated with at least one {@code MetricCategory}. This setting determines which + * category of metrics uploaded to CloudWatch. Any metrics {@link #publish(MetricCollection)}ed that do not fall under + * these configured categories are ignored. + * + *

Note: If there are {@link #dimensions(Collection)} configured that do not fall under these {@code MetricCategory} + * values, the dimensions will NOT be ignored. In other words, the metric category configuration only affects which + * metrics are uploaded to CloudWatch, not which values can be used for {@code dimensions}. + */ + public Builder metricCategories(Collection metricCategories) { + this.metricCategories = new ArrayList<>(metricCategories); + return this; + } + + /** + * @see #metricCategories(Collection) + */ + public Builder metricCategories(MetricCategory... metricCategories) { + return metricCategories(Arrays.asList(metricCategories)); + } + + /** + * Configure the {@link MetricLevel} that should be uploaded to CloudWatch. + * + *

If this is not specified, {@link MetricLevel#INFO} is used. + * + *

All {@link SdkMetric}s are associated with one {@code MetricLevel}. This setting determines which level of metrics + * uploaded to CloudWatch. Any metrics {@link #publish(MetricCollection)}ed that do not fall under these configured + * categories are ignored. + * + *

Note: If there are {@link #dimensions(Collection)} configured that do not fall under this {@code MetricLevel} + * values, the dimensions will NOT be ignored. In other words, the metric category configuration only affects which + * metrics are uploaded to CloudWatch, not which values can be used for {@code dimensions}. + */ + public Builder metricLevel(MetricLevel metricLevel) { + this.metricLevel = metricLevel; + return this; + } + + /** + * Configure the set of metrics for which detailed values and counts are uploaded to CloudWatch, instead of summaries. + * + *

By default, all metrics published to this publisher are summarized using {@link StatisticSet}s. This saves memory, + * because it allows the publisher to store a fixed amount of information in memory, no matter how many different metric + * values are published. The drawback is that metrics other than count, sum, average, maximum and minimum are not made + * available in CloudWatch. The {@code detailedMetrics} setting instructs the publisher to store and publish itemized + * {@link MetricDatum#values()} and {@link MetricDatum#counts()}, which enables other metrics like p90 and p99 to be + * queried in CloudWatch. + * + *

Warning: When {@code detailedMetrics} are enabled, all unique metric values are stored in memory until they + * can be published to CloudWatch. A high {@code uploadFrequency} with multiple {@code detailedMetrics} enabled can + * quickly consume heap memory while the values wait to be published to CloudWatch. In memory constrained environments, it + * is recommended to minimize the number of {@code detailedMetrics} configured on the publisher, or to upload metric data + * more frequently. As with all performance and resource concerns, profiling in a production-like environment is + * encouraged. + * + *

In addition to additional heap memory usage, detailed metrics can result in more requests being sent to CloudWatch, + * which can also introduce additional usage cost. The {@link #maximumCallsPerUpload(Integer)} acts as a safeguard against + * too many calls being made, but if you configure multiple {@code detailedMetrics}, you may need to increase the + * {@code maximumCallsPerUpload} limit. + */ + public Builder detailedMetrics(Collection> detailedMetrics) { + this.detailedMetrics = new ArrayList<>(detailedMetrics); + return this; + } + + /** + * @see #detailedMetrics(Collection) + */ + public Builder detailedMetrics(SdkMetric... detailedMetrics) { + return detailedMetrics(Arrays.asList(detailedMetrics)); + } + + /** + * Build a {@link CloudWatchMetricPublisher} using the configuration currently configured on this publisher. + */ + public CloudWatchMetricPublisher build() { + return new CloudWatchMetricPublisher(this); + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/CloudWatchMetricLogger.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/CloudWatchMetricLogger.java new file mode 100644 index 000000000000..e161df64cb60 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/CloudWatchMetricLogger.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Logger; + +/** + * A holder for {@link #METRIC_LOGGER}. + */ +@SdkInternalApi +public class CloudWatchMetricLogger { + /** + * The logger via which all cloudwatch-metric-publisher logs are written. This allows customers to easily enable/disable logs + * written from this module. + */ + public static final Logger METRIC_LOGGER = Logger.loggerFor("software.amazon.awssdk.metrics.publishers.cloudwatch"); + + private CloudWatchMetricLogger() { + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploader.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploader.java new file mode 100644 index 000000000000..b77398989fa3 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploader.java @@ -0,0 +1,71 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal; + +import static software.amazon.awssdk.metrics.publishers.cloudwatch.internal.CloudWatchMetricLogger.METRIC_LOGGER; + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; + +/** + * Uploads {@link PutMetricDataRequest}s to a {@link CloudWatchAsyncClient}, logging whether it was successful or a failure to + * the {@link CloudWatchMetricLogger#METRIC_LOGGER}. + */ +@SdkInternalApi +public class MetricUploader { + private final CloudWatchAsyncClient cloudWatchClient; + + public MetricUploader(CloudWatchAsyncClient cloudWatchClient) { + this.cloudWatchClient = cloudWatchClient; + } + + /** + * Upload the provided list of requests to CloudWatch, completing the returned future when the uploads complete. Note: This + * will log a message if one of the provided requests fails. + */ + public CompletableFuture upload(List requests) { + CompletableFuture[] publishResults = startCalls(requests); + return CompletableFuture.allOf(publishResults).whenComplete((r, t) -> { + int numRequests = publishResults.length; + if (t != null) { + METRIC_LOGGER.warn(() -> "Failed while publishing some or all AWS SDK client-side metrics to CloudWatch.", t); + } else { + METRIC_LOGGER.debug(() -> "Successfully published " + numRequests + + " AWS SDK client-side metric requests to CloudWatch."); + } + }); + } + + private CompletableFuture[] startCalls(List requests) { + return requests.stream() + .peek(this::logRequest) + .map(cloudWatchClient::putMetricData) + .toArray(CompletableFuture[]::new); + } + + private void logRequest(PutMetricDataRequest putMetricDataRequest) { + METRIC_LOGGER.trace(() -> "Sending request to CloudWatch: " + putMetricDataRequest); + } + + public void close(boolean closeClient) { + if (closeClient) { + this.cloudWatchClient.close(); + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/AggregateMetricsTask.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/AggregateMetricsTask.java new file mode 100644 index 000000000000..f7c997795efb --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/AggregateMetricsTask.java @@ -0,0 +1,42 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.task; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.publishers.cloudwatch.CloudWatchMetricPublisher; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.MetricCollectionAggregator; + +/** + * A task that is executed on the {@link CloudWatchMetricPublisher}'s executor to add a {@link MetricCollection} to a + * {@link MetricCollectionAggregator}. + */ +@SdkInternalApi +public class AggregateMetricsTask implements Runnable { + private final MetricCollectionAggregator collectionAggregator; + private final MetricCollection metricCollection; + + public AggregateMetricsTask(MetricCollectionAggregator collectionAggregator, + MetricCollection metricCollection) { + this.collectionAggregator = collectionAggregator; + this.metricCollection = metricCollection; + } + + @Override + public void run() { + collectionAggregator.addCollection(metricCollection); + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasks.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasks.java new file mode 100644 index 000000000000..71808609c5b4 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasks.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.task; + +import static software.amazon.awssdk.metrics.publishers.cloudwatch.internal.CloudWatchMetricLogger.METRIC_LOGGER; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.publishers.cloudwatch.CloudWatchMetricPublisher; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.MetricUploader; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.MetricCollectionAggregator; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; + +/** + * A task that is executed on the {@link CloudWatchMetricPublisher}'s executor to collect requests from a + * {@link MetricCollectionAggregator} and write them to a {@link MetricUploader}. + */ +@SdkInternalApi +public class UploadMetricsTasks implements Runnable { + private final MetricCollectionAggregator collectionAggregator; + private final MetricUploader uploader; + private int maximumRequestsPerFlush; + + public UploadMetricsTasks(MetricCollectionAggregator collectionAggregator, + MetricUploader uploader, + int maximumRequestsPerFlush) { + this.collectionAggregator = collectionAggregator; + this.uploader = uploader; + this.maximumRequestsPerFlush = maximumRequestsPerFlush; + } + + @Override + public void run() { + List allRequests = collectionAggregator.getRequests(); + List requests = allRequests; + if (requests.size() > maximumRequestsPerFlush) { + METRIC_LOGGER.warn(() -> "Maximum AWS SDK client-side metric call count exceeded: " + allRequests.size() + + " > " + maximumRequestsPerFlush + ". Some metric requests will be dropped. This occurs when " + + "the caller has configured too many metrics or too unique of dimensions without an " + + "associated increase in the maximum-calls-per-upload configured on the publisher."); + requests = requests.subList(0, maximumRequestsPerFlush); + } + + uploader.upload(requests); + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/DetailedMetricAggregator.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/DetailedMetricAggregator.java new file mode 100644 index 000000000000..4ec56a053750 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/DetailedMetricAggregator.java @@ -0,0 +1,87 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; + +/** + * An implementation of {@link MetricAggregator} that stores all values and counts for a given metric/dimension pair + * until they can be added to a {@link MetricDatum}. + */ +@SdkInternalApi +class DetailedMetricAggregator implements MetricAggregator { + private final SdkMetric metric; + private final List dimensions; + private final StandardUnit unit; + + private final Map metricDetails = new HashMap<>(); + + DetailedMetricAggregator(MetricAggregatorKey key, StandardUnit unit) { + this.metric = key.metric(); + this.dimensions = key.dimensions(); + this.unit = unit; + } + + @Override + public SdkMetric metric() { + return metric; + } + + @Override + public List dimensions() { + return dimensions; + } + + @Override + public void addMetricValue(double value) { + metricDetails.computeIfAbsent(value, v -> new DetailedMetrics(value)).metricCount++; + } + + @Override + public StandardUnit unit() { + return unit; + } + + public Collection detailedMetrics() { + return Collections.unmodifiableCollection(metricDetails.values()); + } + + public static class DetailedMetrics { + private final double metricValue; + private int metricCount = 0; + + private DetailedMetrics(double metricValue) { + this.metricValue = metricValue; + } + + public double metricValue() { + return metricValue; + } + + public int metricCount() { + return metricCount; + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregator.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregator.java new file mode 100644 index 000000000000..9f088ad25c57 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregator.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import java.util.Collection; +import java.util.List; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.publishers.cloudwatch.CloudWatchMetricPublisher; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; + +/** + * Used by {@link MetricCollectionAggregator} to aggregate metrics in memory until they are ready to be added to a + * {@link MetricDatum}. + * + *

This is either a {@link SummaryMetricAggregator} or a {@link DetailedMetricAggregator}, depending on the configured + * {@link CloudWatchMetricPublisher.Builder#detailedMetrics(Collection)} setting. + */ +@SdkInternalApi +interface MetricAggregator { + /** + * The metric that this aggregator is aggregating. For example, this may be aggregating {@link CoreMetric#API_CALL_DURATION} + * metric values. There may be multiple aggregators for a single type of metric, when their {@link #dimensions()} differ. + */ + SdkMetric metric(); + + /** + * The dimensions associated with the metric values that this aggregator is aggregating. For example, this may be aggregating + * "S3's putObject" metrics or "DynamoDb's listTables" metrics. The exact metric being aggregated is available via + * {@link #metric()}. + */ + List dimensions(); + + /** + * Get the unit of the {@link #metric()} when it is published to CloudWatch. + */ + StandardUnit unit(); + + /** + * Add the provided metric value to this aggregator. + */ + void addMetricValue(double value); + + /** + * Execute the provided consumer if this {@code MetricAggregator} is a {@link SummaryMetricAggregator}. + */ + default void ifSummary(Consumer summaryConsumer) { + if (this instanceof SummaryMetricAggregator) { + summaryConsumer.accept((SummaryMetricAggregator) this); + } + } + + /** + * Execute the provided consumer if this {@code MetricAggregator} is a {@link DetailedMetricAggregator}. + */ + default void ifDetailed(Consumer detailsConsumer) { + if (this instanceof DetailedMetricAggregator) { + detailsConsumer.accept((DetailedMetricAggregator) this); + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregatorKey.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregatorKey.java new file mode 100644 index 000000000000..5c07b7744065 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricAggregatorKey.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; + +/** + * A pairing of {@link SdkMetric} and {@link Dimension}s that can be used as a key in a map. This uniquely identifies a specific + * {@link MetricAggregator}. + */ +@SdkInternalApi +class MetricAggregatorKey { + private final SdkMetric metric; + private final List dimensions; + + MetricAggregatorKey(SdkMetric metric, List dimensions) { + this.metric = metric; + this.dimensions = dimensions; + } + + public final SdkMetric metric() { + return this.metric; + } + + public final List dimensions() { + return this.dimensions; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MetricAggregatorKey that = (MetricAggregatorKey) o; + + if (!metric.equals(that.metric)) { + return false; + } + return dimensions.equals(that.dimensions); + } + + @Override + public int hashCode() { + int result = metric.hashCode(); + result = 31 * result + dimensions.hashCode(); + return result; + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregator.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregator.java new file mode 100644 index 000000000000..9a00b2d8fa04 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregator.java @@ -0,0 +1,215 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Stream; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.ApiName; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.DetailedMetricAggregator.DetailedMetrics; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; + +/** + * Aggregates {@link MetricCollection}s by: (1) the minute in which they occurred, and (2) the dimensions in the collection + * associated with that metric. Allows retrieving the aggregated values as a list of {@link PutMetricDataRequest}s. + * + *

It would be too expensive to upload every {@code MetricCollection} as a unique {@code PutMetricDataRequest}, so this + * class aggregates the data so that multiple {@code MetricCollection}s can be placed in the same {@code PutMetricDataRequest}. + * + *

Warning: This class is *not* thread-safe. + */ +@SdkInternalApi +@NotThreadSafe +public class MetricCollectionAggregator { + /** + * The maximum number of {@link MetricDatum}s allowed in {@link PutMetricDataRequest#metricData()}. This limit is imposed by + * CloudWatch. + */ + public static final int MAX_METRIC_DATA_PER_REQUEST = 20; + + /** + * The maximum number of unique {@link MetricDatum#values()} allowed in a single {@link PutMetricDataRequest}. This limit is + * not imposed directly by CloudWatch, but they do impose a 40KB limit for a single request. This value was determined by + * trial-and-error to roughly equate to a 40KB limit when we are also at the {@link #MAX_METRIC_DATA_PER_REQUEST}. + */ + public static final int MAX_VALUES_PER_REQUEST = 300; + + /** + * The API name to include in the user agent for all {@link PutMetricDataRequest}s generated by this aggregator. + */ + private static final ApiName API_NAME = ApiName.builder().name("hll").version("cw-mp").build(); + + /** + * The {@link PutMetricDataRequest#namespace()} that should be used for all {@link PutMetricDataRequest}s returned from + * {@link #getRequests()}. + */ + private final String namespace; + + /** + * The {@link TimeBucketedMetrics} that actually performs the data aggregation whenever + * {@link #addCollection(MetricCollection)} is called. + */ + private final TimeBucketedMetrics timeBucketedMetrics; + + public MetricCollectionAggregator(String namespace, + Set> dimensions, + Set metricCategories, + MetricLevel metricLevel, + Set> detailedMetrics) { + this.namespace = namespace; + this.timeBucketedMetrics = new TimeBucketedMetrics(dimensions, metricCategories, metricLevel, detailedMetrics); + } + + /** + * Add a collection to this aggregator. + */ + public void addCollection(MetricCollection collection) { + timeBucketedMetrics.addMetrics(collection); + } + + /** + * Get all {@link PutMetricDataRequest}s that can be generated from the data that was added via + * {@link #addCollection(MetricCollection)}. This method resets the state of this {@code MetricCollectionAggregator}. + */ + public List getRequests() { + List requests = new ArrayList<>(); + + List requestMetricDatums = new ArrayList<>(); + ValuesInRequestCounter valuesInRequestCounter = new ValuesInRequestCounter(); + + Map> metrics = timeBucketedMetrics.timeBucketedMetrics(); + + for (Map.Entry> entry : metrics.entrySet()) { + Instant timeBucket = entry.getKey(); + for (MetricAggregator metric : entry.getValue()) { + if (requestMetricDatums.size() >= MAX_METRIC_DATA_PER_REQUEST) { + requests.add(newPutRequest(requestMetricDatums)); + requestMetricDatums.clear(); + } + + metric.ifSummary(summaryAggregator -> requestMetricDatums.add(summaryMetricDatum(timeBucket, summaryAggregator))); + + metric.ifDetailed(detailedAggregator -> { + int startIndex = 0; + Collection detailedMetrics = detailedAggregator.detailedMetrics(); + + while (startIndex < detailedMetrics.size()) { + if (valuesInRequestCounter.get() >= MAX_VALUES_PER_REQUEST) { + requests.add(newPutRequest(requestMetricDatums)); + requestMetricDatums.clear(); + valuesInRequestCounter.reset(); + } + + MetricDatum data = detailedMetricDatum(timeBucket, detailedAggregator, + startIndex, MAX_VALUES_PER_REQUEST - valuesInRequestCounter.get()); + int valuesAdded = data.values().size(); + startIndex += valuesAdded; + valuesInRequestCounter.add(valuesAdded); + requestMetricDatums.add(data); + } + }); + } + } + + if (!requestMetricDatums.isEmpty()) { + requests.add(newPutRequest(requestMetricDatums)); + } + + timeBucketedMetrics.reset(); + + return requests; + } + + private MetricDatum detailedMetricDatum(Instant timeBucket, + DetailedMetricAggregator metric, + int metricStartIndex, + int maxElements) { + List values = new ArrayList<>(); + List counts = new ArrayList<>(); + + Stream boundedMetrics = metric.detailedMetrics() + .stream() + .skip(metricStartIndex) + .limit(maxElements); + + boundedMetrics.forEach(detailedMetrics -> { + values.add(MetricValueNormalizer.normalize(detailedMetrics.metricValue())); + counts.add((double) detailedMetrics.metricCount()); + }); + + return MetricDatum.builder() + .timestamp(timeBucket) + .metricName(metric.metric().name()) + .dimensions(metric.dimensions()) + .unit(metric.unit()) + .values(values) + .counts(counts) + .build(); + } + + private MetricDatum summaryMetricDatum(Instant timeBucket, + SummaryMetricAggregator metric) { + StatisticSet stats = StatisticSet.builder() + .minimum(MetricValueNormalizer.normalize(metric.min())) + .maximum(MetricValueNormalizer.normalize(metric.max())) + .sum(MetricValueNormalizer.normalize(metric.sum())) + .sampleCount((double) metric.count()) + .build(); + return MetricDatum.builder() + .timestamp(timeBucket) + .metricName(metric.metric().name()) + .dimensions(metric.dimensions()) + .unit(metric.unit()) + .statisticValues(stats) + .build(); + } + + private PutMetricDataRequest newPutRequest(List metricData) { + return PutMetricDataRequest.builder() + .overrideConfiguration(r -> r.addApiName(API_NAME)) + .namespace(namespace) + .metricData(metricData) + .build(); + } + + private static class ValuesInRequestCounter { + private int valuesInRequest; + + private void add(int i) { + valuesInRequest += i; + } + + private int get() { + return valuesInRequest; + } + + private void reset() { + valuesInRequest = 0; + } + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricValueNormalizer.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricValueNormalizer.java new file mode 100644 index 000000000000..2767c39379a9 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricValueNormalizer.java @@ -0,0 +1,45 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +class MetricValueNormalizer { + /** + * Really small values (close to 0) result in CloudWatch failing with an "unsupported value" error. Make sure that we floor + * those values to 0 to prevent that error. + */ + private static final double ZERO_THRESHOLD = 0.0001; + + private MetricValueNormalizer() { + } + + /** + * Normalizes a metric value so that it won't upset CloudWatch when it is uploaded. + */ + public static double normalize(double value) { + if (value > ZERO_THRESHOLD) { + return value; + } + + if (value < -ZERO_THRESHOLD) { + return value; + } + + return 0; + } +} \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/SummaryMetricAggregator.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/SummaryMetricAggregator.java new file mode 100644 index 000000000000..3da4b87ed8f0 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/SummaryMetricAggregator.java @@ -0,0 +1,84 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; + +/** + * An implementation of {@link MetricAggregator} that stores summary statistics for a given metric/dimension pair until the + * summary can be added to a {@link MetricDatum}. + */ +@SdkInternalApi +class SummaryMetricAggregator implements MetricAggregator { + private final SdkMetric metric; + private final List dimensions; + private final StandardUnit unit; + + private double min = Double.MAX_VALUE; + private double max = Double.MIN_VALUE; + private double sum = 0; + private int count = 0; + + SummaryMetricAggregator(MetricAggregatorKey key, StandardUnit unit) { + this.metric = key.metric(); + this.dimensions = key.dimensions(); + this.unit = unit; + } + + @Override + public SdkMetric metric() { + return metric; + } + + @Override + public List dimensions() { + return dimensions; + } + + @Override + public void addMetricValue(double value) { + min = Double.min(value, min); + max = Double.max(value, max); + sum += value; + ++count; + } + + @Override + public StandardUnit unit() { + return unit; + } + + public double min() { + return min; + } + + public double max() { + return max; + } + + public double sum() { + return sum; + } + + public int count() { + return count; + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/TimeBucketedMetrics.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/TimeBucketedMetrics.java new file mode 100644 index 000000000000..949f16a01504 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/TimeBucketedMetrics.java @@ -0,0 +1,226 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import static java.time.temporal.ChronoUnit.MINUTES; + +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; + +/** + * "Buckets" metrics by the minute in which they were collected. This allows all metric data for a given 1-minute period to be + * aggregated under a specific {@link MetricAggregator}. + */ +@SdkInternalApi +class TimeBucketedMetrics { + /** + * A map from "the minute during which a metric value happened" to "the dimension and metric associated with the metric + * values" to "the aggregator for the metric values that occurred within that minute and for that dimension/metric". + */ + private final Map> timeBucketedMetrics = new HashMap<>(); + + /** + * The dimensions that should be used for aggregating metrics that occur within a given minute. These are optional values. + * The dimensions will be used if a {@link MetricCollection} includes them, but if it does not, it will be aggregated with + * whatever dimensions (if any) are available. + */ + private final Set> dimensions; + + /** + * The set of metrics for which {@link DetailedMetricAggregator}s should be used for aggregation. All other metrics will use + * a {@link SummaryMetricAggregator}. + */ + private final Set> detailedMetrics; + + /** + * The metric categories for which we should aggregate values. Any categories outside of this set will have their values + * ignored/dropped. + */ + private final Set metricCategories; + + /** + * The metric levels for which we should aggregate values. Any categories at a more "verbose" level than this one will have + * their values ignored/dropped. + */ + private final MetricLevel metricLevel; + + /** + * True, when the {@link #metricCategories} contains {@link MetricCategory#ALL}. + */ + private final boolean metricCategoriesContainsAll; + + + + TimeBucketedMetrics(Set> dimensions, + Set metricCategories, + MetricLevel metricLevel, + Set> detailedMetrics) { + this.dimensions = dimensions; + this.detailedMetrics = detailedMetrics; + this.metricCategories = metricCategories; + this.metricLevel = metricLevel; + this.metricCategoriesContainsAll = metricCategories.contains(MetricCategory.ALL); + } + + /** + * Add the provided collection to the proper bucket, based on the metric collection's time. + */ + public void addMetrics(MetricCollection metrics) { + Instant bucket = getBucket(metrics); + addMetricsToBucket(metrics, bucket); + } + + /** + * Reset this bucket, clearing all stored values. + */ + public void reset() { + timeBucketedMetrics.clear(); + } + + /** + * Retrieve all values in this collection. The map key is the minute in which the metric values were collected, and the + * map value are all of the metrics that were aggregated during that minute. + */ + public Map> timeBucketedMetrics() { + return timeBucketedMetrics.entrySet() + .stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue().values())); + } + + private Instant getBucket(MetricCollection metrics) { + return metrics.creationTime().truncatedTo(MINUTES); + } + + private void addMetricsToBucket(MetricCollection metrics, Instant bucketId) { + aggregateMetrics(metrics, timeBucketedMetrics.computeIfAbsent(bucketId, i -> new HashMap<>())); + } + + private void aggregateMetrics(MetricCollection metrics, Map bucket) { + List dimensions = dimensions(metrics); + extractAllMetrics(metrics).forEach(metricRecord -> { + MetricAggregatorKey aggregatorKey = new MetricAggregatorKey(metricRecord.metric(), dimensions); + valueFor(metricRecord).ifPresent(metricValue -> { + bucket.computeIfAbsent(aggregatorKey, m -> newAggregator(aggregatorKey)) + .addMetricValue(MetricValueNormalizer.normalize(metricValue)); + }); + }); + } + + private List dimensions(MetricCollection metricCollection) { + List result = new ArrayList<>(); + for (MetricRecord metricRecord : metricCollection) { + if (dimensions.contains(metricRecord.metric())) { + result.add(Dimension.builder() + .name(metricRecord.metric().name()) + .value((String) metricRecord.value()) + .build()); + } + } + + // Sort the dimensions to make sure that the order in the input metric collection doesn't affect the result. + // We use descending order just so that "ServiceName" is before "OperationName" when we use the default dimensions. + result.sort(Comparator.comparing(Dimension::name).reversed()); + return result; + } + + private List> extractAllMetrics(MetricCollection metrics) { + List> result = new ArrayList<>(); + extractAllMetrics(metrics, result); + return result; + } + + private void extractAllMetrics(MetricCollection metrics, List> extractedMetrics) { + for (MetricRecord metric : metrics) { + extractedMetrics.add(metric); + } + metrics.children().forEach(child -> extractAllMetrics(child, extractedMetrics)); + } + + private MetricAggregator newAggregator(MetricAggregatorKey aggregatorKey) { + SdkMetric metric = aggregatorKey.metric(); + StandardUnit metricUnit = unitFor(metric); + if (detailedMetrics.contains(metric)) { + return new DetailedMetricAggregator(aggregatorKey, metricUnit); + } else { + return new SummaryMetricAggregator(aggregatorKey, metricUnit); + } + } + + private StandardUnit unitFor(SdkMetric metric) { + Class metricType = metric.valueClass(); + + if (Duration.class.isAssignableFrom(metricType)) { + return StandardUnit.MILLISECONDS; + } + + return StandardUnit.NONE; + } + + private Optional valueFor(MetricRecord metricRecord) { + if (!shouldReport(metricRecord)) { + return Optional.empty(); + } + + Class metricType = metricRecord.metric().valueClass(); + + if (Duration.class.isAssignableFrom(metricType)) { + Duration durationMetricValue = (Duration) metricRecord.value(); + long millis = durationMetricValue.toMillis(); + return Optional.of((double) millis); + } else if (Number.class.isAssignableFrom(metricType)) { + Number numberMetricValue = (Number) metricRecord.value(); + return Optional.of(numberMetricValue.doubleValue()); + } else if (Boolean.class.isAssignableFrom(metricType)) { + Boolean booleanMetricValue = (Boolean) metricRecord.value(); + return Optional.of(booleanMetricValue ? 1.0 : 0.0); + } + + return Optional.empty(); + } + + private boolean shouldReport(MetricRecord metricRecord) { + return isSupportedCategory(metricRecord) && isSupportedLevel(metricRecord); + } + + private boolean isSupportedCategory(MetricRecord metricRecord) { + return metricCategoriesContainsAll || + metricRecord.metric() + .categories() + .stream() + .anyMatch(metricCategories::contains); + } + + private boolean isSupportedLevel(MetricRecord metricRecord) { + return metricLevel.includesLevel(metricRecord.metric().level()); + } +} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterHandler.java b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/package-info.java similarity index 62% rename from test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterHandler.java rename to metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/package-info.java index e98c7f6d0dc7..4c7360a33f87 100644 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterHandler.java +++ b/metric-publishers/cloudwatch-metric-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/cloudwatch/package-info.java @@ -13,16 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.core.waiters; - -/** - * Callbacks are executed synchronously. That is the same thread the waiter - * completes on and it's not submitted back to the executor. - */ -public abstract class WaiterHandler { - - public abstract void onWaitSuccess(InputT request); - - public abstract void onWaitFailure(Exception e); -} +@SdkPreviewApi +package software.amazon.awssdk.metrics.publishers.cloudwatch; +import software.amazon.awssdk.annotations.SdkPreviewApi; \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisherTest.java b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisherTest.java new file mode 100644 index 000000000000..316c9762a919 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/CloudWatchMetricPublisherTest.java @@ -0,0 +1,250 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.never; + +import java.time.Duration; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.MetricCollectionAggregator; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.services.cloudwatch.model.Dimension; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataResponse; + +public class CloudWatchMetricPublisherTest { + private CloudWatchAsyncClient cloudWatch; + + private CloudWatchMetricPublisher.Builder publisherBuilder; + + @Before + public void setup() { + cloudWatch = Mockito.mock(CloudWatchAsyncClient.class); + publisherBuilder = CloudWatchMetricPublisher.builder() + .cloudWatchClient(cloudWatch) + .uploadFrequency(Duration.ofMinutes(60)); + + Mockito.when(cloudWatch.putMetricData(any(PutMetricDataRequest.class))) + .thenReturn(CompletableFuture.completedFuture(PutMetricDataResponse.builder().build())); + } + + @Test + public void noMetricsNoCalls() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.build()) { + publisher.publish(MetricCollector.create("test").collect()); + } + assertNoPutMetricCalls(); + } + + @Test + public void interruptedShutdownStillTerminates() { + CloudWatchMetricPublisher publisher = publisherBuilder.build(); + Thread.currentThread().interrupt(); + publisher.close(); + assertThat(publisher.isShutdown()).isTrue(); + + Thread.interrupted(); // Clear interrupt flag + } + + @Test + public void closeDoesNotCloseConfiguredClient() { + CloudWatchMetricPublisher.builder().cloudWatchClient(cloudWatch).build().close(); + Mockito.verify(cloudWatch, never()).close(); + } + + @Test + public void defaultNamespaceIsCorrect() { + try (CloudWatchMetricPublisher publisher = CloudWatchMetricPublisher.builder() + .cloudWatchClient(cloudWatch) + .build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + PutMetricDataRequest call = getPutMetricCall(); + assertThat(call.namespace()).isEqualTo("AwsSdk/JavaSdk2"); + } + + @Test + public void defaultDimensionsIsCorrect() { + try (CloudWatchMetricPublisher publisher = CloudWatchMetricPublisher.builder() + .cloudWatchClient(cloudWatch) + .build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + PutMetricDataRequest call = getPutMetricCall(); + assertThat(call.metricData().get(0).dimensions()) + .containsExactlyInAnyOrder(Dimension.builder() + .name(CoreMetric.SERVICE_ID.name()) + .value("ServiceId") + .build(), + Dimension.builder() + .name(CoreMetric.OPERATION_NAME.name()) + .value("OperationName") + .build()); + } + + @Test + public void namespaceSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.namespace("namespace").build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + assertThat(getPutMetricCall().namespace()).isEqualTo("namespace"); + } + + @Test + public void dimensionsSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.dimensions(CoreMetric.SERVICE_ID).build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + PutMetricDataRequest call = getPutMetricCall(); + assertThat(call.metricData().get(0).dimensions()).containsExactly(Dimension.builder() + .name(CoreMetric.SERVICE_ID.name()) + .value("ServiceId") + .build()); + } + + @Test + public void metricCategoriesSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.metricCategories(MetricCategory.HTTP_CLIENT).build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.API_CALL_SUCCESSFUL, true); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 5); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + PutMetricDataRequest call = getPutMetricCall(); + MetricDatum metric = call.metricData().get(0); + assertThat(call.metricData()).hasSize(1); + assertThat(metric.dimensions()).containsExactly(Dimension.builder() + .name(CoreMetric.SERVICE_ID.name()) + .value("ServiceId") + .build()); + assertThat(metric.metricName()).isEqualTo(HttpMetric.AVAILABLE_CONCURRENCY.name()); + } + + @Test + public void metricLevelSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.metricLevel(MetricLevel.INFO).build()) { + MetricCollector collector = newCollector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.API_CALL_SUCCESSFUL, true); + collector.reportMetric(HttpMetric.HTTP_STATUS_CODE, 404); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + + PutMetricDataRequest call = getPutMetricCall(); + MetricDatum metric = call.metricData().get(0); + assertThat(call.metricData()).hasSize(1); + assertThat(metric.dimensions()).containsExactly(Dimension.builder() + .name(CoreMetric.SERVICE_ID.name()) + .value("ServiceId") + .build()); + assertThat(metric.metricName()).isEqualTo(CoreMetric.API_CALL_SUCCESSFUL.name()); + } + + @Test + public void maximumCallsPerPublishSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.maximumCallsPerUpload(1) + .detailedMetrics(HttpMetric.AVAILABLE_CONCURRENCY) + .build()) { + for (int i = 0; i < MetricCollectionAggregator.MAX_VALUES_PER_REQUEST + 1; ++i) { + MetricCollector collector = newCollector(); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, i); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + } + + assertThat(getPutMetricCalls()).hasSize(1); + } + + @Test + public void detailedMetricsSettingIsHonored() { + try (CloudWatchMetricPublisher publisher = publisherBuilder.detailedMetrics(HttpMetric.AVAILABLE_CONCURRENCY).build()) { + for (int i = 0; i < 10; ++i) { + MetricCollector collector = newCollector(); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 10); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, i); + publisher.publish(new FixedTimeMetricCollection(collector.collect())); + } + } + + PutMetricDataRequest call = getPutMetricCall(); + MetricDatum concurrencyMetric = getDatum(call, HttpMetric.MAX_CONCURRENCY); + MetricDatum availableConcurrency = getDatum(call, HttpMetric.AVAILABLE_CONCURRENCY); + + assertThat(concurrencyMetric.values()).isEmpty(); + assertThat(concurrencyMetric.counts()).isEmpty(); + assertThat(concurrencyMetric.statisticValues()).isNotNull(); + + assertThat(availableConcurrency.values()).isNotEmpty(); + assertThat(availableConcurrency.counts()).isNotEmpty(); + assertThat(availableConcurrency.statisticValues()).isNull(); + } + + private MetricDatum getDatum(PutMetricDataRequest call, SdkMetric metric) { + return call.metricData().stream().filter(m -> m.metricName().equals(metric.name())).findAny().get(); + } + + private PutMetricDataRequest getPutMetricCall() { + List calls = getPutMetricCalls(); + assertThat(calls).hasSize(1); + return calls.get(0); + } + + private List getPutMetricCalls() { + ArgumentCaptor captor = ArgumentCaptor.forClass(PutMetricDataRequest.class); + Mockito.verify(cloudWatch).putMetricData(captor.capture()); + return captor.getAllValues(); + } + + private void assertNoPutMetricCalls() { + Mockito.verify(cloudWatch, never()).putMetricData(any(PutMetricDataRequest.class)); + } + + private MetricCollector newCollector() { + return MetricCollector.create("test"); + } +} \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/FixedTimeMetricCollection.java b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/FixedTimeMetricCollection.java new file mode 100644 index 000000000000..3df2fd44c276 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/FixedTimeMetricCollection.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch; + +import java.time.Instant; +import java.util.Iterator; +import java.util.List; +import java.util.stream.Collectors; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricRecord; +import software.amazon.awssdk.metrics.SdkMetric; + +/** + * An implementation of {@link MetricCollection} that sets a static time for the {@link #creationTime()}. This makes it easier + * to test aggregation behavior, because the times can be fixed instead of regenerated each time the {@code MetricCollection} is + * created. + */ +public class FixedTimeMetricCollection implements MetricCollection { + private final MetricCollection delegate; + private final Instant creationTime; + + public FixedTimeMetricCollection(MetricCollection delegate) { + this(delegate, Instant.EPOCH); + } + + public FixedTimeMetricCollection(MetricCollection delegate, + Instant creationTime) { + this.delegate = delegate; + this.creationTime = creationTime; + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public List metricValues(SdkMetric metric) { + return delegate.metricValues(metric); + } + + @Override + public List children() { + return delegate.children() + .stream() + .map(c -> new FixedTimeMetricCollection(c, creationTime)) + .collect(Collectors.toList()); + } + + @Override + public Instant creationTime() { + return creationTime; + } + + @Override + public Iterator> iterator() { + return delegate.iterator(); + } +} diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploaderTest.java b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploaderTest.java new file mode 100644 index 000000000000..daaec59916c9 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/MetricUploaderTest.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataResponse; + +public class MetricUploaderTest { + private List> putMetricDataResponseFutures = new ArrayList<>(); + + private CloudWatchAsyncClient client; + + private MetricUploader uploader; + + @Before + public void setUp() { + client = Mockito.mock(CloudWatchAsyncClient.class); + uploader = new MetricUploader(client); + + Mockito.when(client.putMetricData(any(PutMetricDataRequest.class))).thenAnswer(p -> { + CompletableFuture result = new CompletableFuture<>(); + putMetricDataResponseFutures.add(result); + return result; + }); + } + + @Test + public void uploadSuccessIsPropagated() { + CompletableFuture uploadFuture = uploader.upload(Arrays.asList(PutMetricDataRequest.builder().build(), + PutMetricDataRequest.builder().build())); + + assertThat(putMetricDataResponseFutures).hasSize(2); + assertThat(uploadFuture).isNotCompleted(); + + putMetricDataResponseFutures.get(0).complete(PutMetricDataResponse.builder().build()); + + assertThat(uploadFuture).isNotCompleted(); + + putMetricDataResponseFutures.get(1).complete(PutMetricDataResponse.builder().build()); + + assertThat(uploadFuture).isCompleted(); + } + + @Test + public void uploadFailureIsPropagated() { + CompletableFuture uploadFuture = uploader.upload(Arrays.asList(PutMetricDataRequest.builder().build(), + PutMetricDataRequest.builder().build())); + + assertThat(putMetricDataResponseFutures).hasSize(2); + assertThat(uploadFuture).isNotCompleted(); + + putMetricDataResponseFutures.get(0).completeExceptionally(new Throwable()); + putMetricDataResponseFutures.get(1).complete(PutMetricDataResponse.builder().build()); + + assertThat(uploadFuture).isCompletedExceptionally(); + } + + @Test + public void closeFalseDoesNotCloseClient() { + uploader.close(false); + Mockito.verify(client, never()).close(); + } + + @Test + public void closeTrueClosesClient() { + uploader.close(true); + Mockito.verify(client, times(1)).close(); + } +} \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasksTest.java b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasksTest.java new file mode 100644 index 000000000000..ec621784c6b8 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/task/UploadMetricsTasksTest.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.task; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.MetricUploader; +import software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform.MetricCollectionAggregator; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; + +public class UploadMetricsTasksTest { + private MetricCollectionAggregator aggregator; + private MetricUploader uploader; + private UploadMetricsTasks task; + + @Before + public void setUp() { + aggregator = Mockito.mock(MetricCollectionAggregator.class); + uploader = Mockito.mock(MetricUploader.class); + task = new UploadMetricsTasks(aggregator, uploader, 2); + } + + + @Test + public void extraTasksAboveMaximumAreDropped() { + List requests = Arrays.asList(PutMetricDataRequest.builder().build(), + PutMetricDataRequest.builder().build(), + PutMetricDataRequest.builder().build()); + Mockito.when(aggregator.getRequests()).thenReturn(requests); + task.run(); + + + ArgumentCaptor captor = ArgumentCaptor.forClass(List.class); + Mockito.verify(uploader).upload(captor.capture()); + List uploadedRequests = captor.getValue(); + + assertThat(uploadedRequests).hasSize(2); + assertThat(uploadedRequests).containsOnlyElementsOf(requests); + } +} \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregatorTest.java b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregatorTest.java new file mode 100644 index 000000000000..e2d537853811 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/cloudwatch/internal/transform/MetricCollectionAggregatorTest.java @@ -0,0 +1,485 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.metrics.publishers.cloudwatch.internal.transform; + +import static java.time.temporal.ChronoUnit.HOURS; +import static org.assertj.core.api.Assertions.assertThat; + +import java.time.Duration; +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.Assert; +import org.junit.Test; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.metrics.MetricCategory; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.MetricLevel; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.metrics.publishers.cloudwatch.FixedTimeMetricCollection; +import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; +import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; + +public class MetricCollectionAggregatorTest { + private static final String DEFAULT_NAMESPACE = "namespace"; + private static final Set> DEFAULT_DIMENSIONS = Stream.of(CoreMetric.SERVICE_ID, CoreMetric.OPERATION_NAME) + .collect(Collectors.toSet()); + private static final MetricLevel DEFAULT_METRIC_LEVEL = MetricLevel.INFO; + private static final Set DEFAULT_CATEGORIES = Collections.singleton(MetricCategory.HTTP_CLIENT); + private static final Set> DEFAULT_DETAILED_METRICS = Collections.emptySet(); + + @Test + public void maximumRequestsIsHonored() { + List requests; + + requests = aggregatorWithUniqueMetricsAdded(MetricCollectionAggregator.MAX_METRIC_DATA_PER_REQUEST).getRequests(); + assertThat(requests).hasOnlyOneElementSatisfying(request -> { + assertThat(request.metricData()).hasSize(MetricCollectionAggregator.MAX_METRIC_DATA_PER_REQUEST); + }); + + requests = aggregatorWithUniqueMetricsAdded(MetricCollectionAggregator.MAX_METRIC_DATA_PER_REQUEST + 1).getRequests(); + assertThat(requests).hasSize(2); + assertThat(requests.get(0).metricData()).hasSize(MetricCollectionAggregator.MAX_METRIC_DATA_PER_REQUEST); + assertThat(requests.get(1).metricData()).hasSize(1); + } + + @Test + public void maximumMetricValuesIsHonored() { + List requests; + + requests = aggregatorWithUniqueValuesAdded(HttpMetric.MAX_CONCURRENCY, + MetricCollectionAggregator.MAX_VALUES_PER_REQUEST).getRequests(); + assertThat(requests).hasSize(1); + validateValuesCount(requests.get(0), MetricCollectionAggregator.MAX_VALUES_PER_REQUEST); + + requests = aggregatorWithUniqueValuesAdded(HttpMetric.MAX_CONCURRENCY, + MetricCollectionAggregator.MAX_VALUES_PER_REQUEST + 1).getRequests(); + assertThat(requests).hasSize(2); + validateValuesCount(requests.get(0), MetricCollectionAggregator.MAX_VALUES_PER_REQUEST); + validateValuesCount(requests.get(1), 1); + } + + private void validateValuesCount(PutMetricDataRequest request, int valuesExpected) { + assertThat(request.metricData().stream().flatMap(m -> m.values().stream())) + .hasSize(valuesExpected); + } + + @Test + public void smallValuesAreNormalizedToZeroWithSummaryMetrics() { + // Really small values (close to 0) result in CloudWatch failing with an "unsupported value" error. Make sure that we + // floor those values to 0 to prevent that error. + + MetricCollectionAggregator aggregator = defaultAggregator(); + + MetricCollector collector = collector(); + SdkMetric metric = someMetric(Double.class); + collector.reportMetric(metric, -1E-10); + collector.reportMetric(metric, 1E-10); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.metricData()).hasOnlyOneElementSatisfying(metricData -> { + StatisticSet stats = metricData.statisticValues(); + assertThat(stats.minimum()).isEqualTo(0.0); + assertThat(stats.maximum()).isEqualTo(0.0); + assertThat(stats.sum()).isEqualTo(0.0); + assertThat(stats.sampleCount()).isEqualTo(2.0); + }); + }); + } + + @Test + public void smallValuesAreNormalizedToZeroWithDetailedMetrics() { + // Really small values (close to 0) result in CloudWatch failing with an "unsupported value" error. Make sure that we + // floor those values to 0 to prevent that error. + + SdkMetric metric = someMetric(Double.class); + MetricCollectionAggregator aggregator = aggregatorWithCustomDetailedMetrics(metric); + + MetricCollector collector = collector(); + collector.reportMetric(metric, -1E-10); + collector.reportMetric(metric, 1E-10); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.metricData()).hasOnlyOneElementSatisfying(metricData -> { + assertThat(metricData.values()).hasOnlyOneElementSatisfying(metricValue -> { + assertThat(metricValue).isEqualTo(0.0); + }); + assertThat(metricData.counts()).hasOnlyOneElementSatisfying(metricCount -> { + assertThat(metricCount).isEqualTo(2.0); + }); + }); + }); + } + + @Test + public void dimensionOrderInCollectionDoesNotMatter() { + MetricCollectionAggregator aggregator = defaultAggregator(); + + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + aggregator.addCollection(collectToFixedTime(collector)); + + collector = collector(); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 2); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.metricData()).hasSize(1); + }); + } + + @Test + public void metricsAreAggregatedByDimensionMetricAndTime() { + MetricCollectionAggregator aggregator = defaultAggregator(); + + MetricCollector collector = collector(); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + aggregator.addCollection(collectToFixedTimeBucket(collector, 0)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 2); + aggregator.addCollection(collectToFixedTimeBucket(collector, 0)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 3); + collector.reportMetric(HttpMetric.AVAILABLE_CONCURRENCY, 4); + aggregator.addCollection(collectToFixedTimeBucket(collector, 0)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(CoreMetric.OPERATION_NAME, "OperationName"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 5); + aggregator.addCollection(collectToFixedTimeBucket(collector, 1)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.namespace()).isEqualTo(DEFAULT_NAMESPACE); + assertThat(request.metricData()).hasSize(5).allSatisfy(data -> { + assertThat(data.values()).isEmpty(); + assertThat(data.counts()).isEmpty(); + if (data.dimensions().isEmpty()) { + assertThat(data.metricName()).isEqualTo(HttpMetric.MAX_CONCURRENCY.name()); + assertThat(data.statisticValues().sampleCount()).isEqualTo(1); + assertThat(data.statisticValues().sum()).isEqualTo(1); + } else if (data.dimensions().size() == 1) { + assertThat(data.metricName()).isEqualTo(HttpMetric.MAX_CONCURRENCY.name()); + assertThat(data.statisticValues().sampleCount()).isEqualTo(1); + assertThat(data.statisticValues().sum()).isEqualTo(2); + } else { + assertThat(data.dimensions().size()).isEqualTo(2); + if (data.timestamp().equals(Instant.EPOCH)) { + // Time bucket 0 + if (data.metricName().equals(HttpMetric.MAX_CONCURRENCY.name())) { + assertThat(data.statisticValues().sampleCount()).isEqualTo(1); + assertThat(data.statisticValues().sum()).isEqualTo(3); + } else { + assertThat(data.metricName()).isEqualTo(HttpMetric.AVAILABLE_CONCURRENCY.name()); + assertThat(data.statisticValues().sampleCount()).isEqualTo(1); + assertThat(data.statisticValues().sum()).isEqualTo(4); + } + } else { + // Time bucket 1 + assertThat(data.metricName()).isEqualTo(HttpMetric.MAX_CONCURRENCY.name()); + assertThat(data.statisticValues().sampleCount()).isEqualTo(1); + assertThat(data.statisticValues().sum()).isEqualTo(5); + } + } + }); + }); + } + + @Test + public void metricSummariesAreCorrectWithValuesInSameCollector() { + MetricCollectionAggregator aggregator = defaultAggregator(); + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 2); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 3); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.namespace()).isEqualTo(DEFAULT_NAMESPACE); + assertThat(request.metricData()).hasOnlyOneElementSatisfying(metricData -> { + assertThat(metricData.dimensions()).hasOnlyOneElementSatisfying(dimension -> { + assertThat(dimension.name()).isEqualTo(CoreMetric.SERVICE_ID.name()); + assertThat(dimension.value()).isEqualTo("ServiceId"); + }); + assertThat(metricData.values()).isEmpty(); + assertThat(metricData.counts()).isEmpty(); + assertThat(metricData.statisticValues()).isEqualTo(StatisticSet.builder() + .minimum(1.0) + .maximum(4.0) + .sum(14.0) + .sampleCount(5.0) + .build()); + }); + }); + } + + @Test + public void metricSummariesAreCorrectWithValuesInDifferentCollector() { + MetricCollectionAggregator aggregator = defaultAggregator(); + + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 2); + aggregator.addCollection(collectToFixedTime(collector)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + aggregator.addCollection(collectToFixedTime(collector)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + aggregator.addCollection(collectToFixedTime(collector)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + aggregator.addCollection(collectToFixedTime(collector)); + + collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 3); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.namespace()).isEqualTo(DEFAULT_NAMESPACE); + assertThat(request.metricData()).hasOnlyOneElementSatisfying(metricData -> { + assertThat(metricData.dimensions()).hasOnlyOneElementSatisfying(dimension -> { + assertThat(dimension.name()).isEqualTo(CoreMetric.SERVICE_ID.name()); + assertThat(dimension.value()).isEqualTo("ServiceId"); + }); + assertThat(metricData.values()).isEmpty(); + assertThat(metricData.counts()).isEmpty(); + assertThat(metricData.statisticValues()).isEqualTo(StatisticSet.builder() + .minimum(1.0) + .maximum(4.0) + .sum(14.0) + .sampleCount(5.0) + .build()); + }); + }); + } + + @Test + public void detailedMetricsAreCorrect() { + MetricCollectionAggregator aggregator = aggregatorWithCustomDetailedMetrics(HttpMetric.MAX_CONCURRENCY); + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 2); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 4); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 3); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasOnlyOneElementSatisfying(request -> { + assertThat(request.namespace()).isEqualTo(DEFAULT_NAMESPACE); + assertThat(request.metricData()).hasOnlyOneElementSatisfying(metricData -> { + assertThat(metricData.dimensions()).hasOnlyOneElementSatisfying(dimension -> { + assertThat(dimension.name()).isEqualTo(CoreMetric.SERVICE_ID.name()); + assertThat(dimension.value()).isEqualTo("ServiceId"); + }); + + assertThat(metricData.values()).hasSize(4); + assertThat(metricData.statisticValues()).isNull(); + for (int i = 0; i < metricData.values().size(); i++) { + Double value = metricData.values().get(i); + Double count = metricData.counts().get(i); + switch (value.toString()) { + case "1.0": + case "2.0": + case "3.0": + assertThat(count).isEqualTo(1.0); + break; + case "4.0": + assertThat(count).isEqualTo(2.0); + break; + default: + Assert.fail(); + } + } + }); + }); + } + + @Test + public void metricsFromOtherCategoriesAreIgnored() { + MetricCollectionAggregator aggregator = defaultAggregator(); + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.HTTP_STATUS_CODE, 404); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).isEmpty(); + } + + @Test + public void getRequestsResetsState() { + MetricCollectionAggregator aggregator = defaultAggregator(); + MetricCollector collector = collector(); + collector.reportMetric(CoreMetric.SERVICE_ID, "ServiceId"); + collector.reportMetric(HttpMetric.MAX_CONCURRENCY, 1); + aggregator.addCollection(collectToFixedTime(collector)); + + assertThat(aggregator.getRequests()).hasSize(1); + assertThat(aggregator.getRequests()).isEmpty(); + } + + @Test + public void numberTypesAreTransformedCorrectly() { + SdkMetric metric = someMetric(CustomNumber.class); + assertThat(transformMetricValueUsingAggregator(metric, new CustomNumber(-1000.5))).isEqualTo(-1000.5); + assertThat(transformMetricValueUsingAggregator(metric, new CustomNumber(0))).isEqualTo(0); + assertThat(transformMetricValueUsingAggregator(metric, new CustomNumber(1000.5))).isEqualTo(1000.5); + } + + @Test + public void durationsAreTransformedCorrectly() { + SdkMetric metric = someMetric(Duration.class); + assertThat(transformMetricValueUsingAggregator(metric, Duration.ofSeconds(-10))).isEqualTo(-10_000); + assertThat(transformMetricValueUsingAggregator(metric, Duration.ofSeconds(0))).isEqualTo(0); + assertThat(transformMetricValueUsingAggregator(metric, Duration.ofSeconds(10))).isEqualTo(10_000); + } + + @Test + public void booleansAreTransformedCorrectly() { + SdkMetric metric = someMetric(Boolean.class); + assertThat(transformMetricValueUsingAggregator(metric, false)).isEqualTo(0.0); + assertThat(transformMetricValueUsingAggregator(metric, true)).isEqualTo(1.0); + } + + private Double transformMetricValueUsingAggregator(SdkMetric metric, T input) { + MetricCollectionAggregator aggregator = aggregatorWithCustomDetailedMetrics(metric); + MetricCollector collector = collector(); + collector.reportMetric(metric, input); + aggregator.addCollection(collectToFixedTime(collector)); + + return aggregator.getRequests().get(0).metricData().get(0).values().get(0); + } + + private MetricCollectionAggregator aggregatorWithUniqueValuesAdded(SdkMetric metric, int numValues) { + MetricCollectionAggregator aggregator = aggregatorWithCustomDetailedMetrics(metric); + for (int i = 0; i < numValues; i++) { + MetricCollector collector = collector(); + collector.reportMetric(metric, i); + aggregator.addCollection(collectToFixedTime(collector)); + } + return aggregator; + } + + private MetricCollectionAggregator aggregatorWithUniqueMetricsAdded(int numMetrics) { + MetricCollectionAggregator aggregator = defaultAggregator(); + MetricCollector collector = collector(); + for (int i = 0; i < numMetrics; i++) { + collector.reportMetric(someMetric(), 0); + } + aggregator.addCollection(collectToFixedTime(collector)); + return aggregator; + } + + private MetricCollectionAggregator defaultAggregator() { + return new MetricCollectionAggregator(DEFAULT_NAMESPACE, + DEFAULT_DIMENSIONS, + DEFAULT_CATEGORIES, + DEFAULT_METRIC_LEVEL, + DEFAULT_DETAILED_METRICS); + } + + private MetricCollectionAggregator aggregatorWithCustomDetailedMetrics(SdkMetric... detailedMetrics) { + return new MetricCollectionAggregator(DEFAULT_NAMESPACE, + DEFAULT_DIMENSIONS, + DEFAULT_CATEGORIES, + DEFAULT_METRIC_LEVEL, + Stream.of(detailedMetrics).collect(Collectors.toSet())); + } + + private MetricCollector collector() { + return MetricCollector.create("test"); + } + + private SdkMetric someMetric() { + return someMetric(Integer.class); + } + + private SdkMetric someMetric(Class clazz) { + return SdkMetric.create(getClass().getSimpleName() + UUID.randomUUID().toString(), + clazz, + MetricLevel.INFO, + MetricCategory.HTTP_CLIENT); + } + + private MetricCollection collectToFixedTime(MetricCollector collector) { + return new FixedTimeMetricCollection(collector.collect()); + } + + private MetricCollection collectToFixedTimeBucket(MetricCollector collector, int timeBucket) { + // Make sure collectors in different "time buckets" are in a different minute than other collectors. We also offset the + // hour by a few seconds, to make sure the metric collection aggregator is actually ignoring the "seconds" portion of + // the collection time. + Instant metricTime = Instant.EPOCH.plus(timeBucket, HOURS) + .plusSeconds(Math.max(59, timeBucket)); + return new FixedTimeMetricCollection(collector.collect(), metricTime); + } + + private static class CustomNumber extends Number { + private final double value; + + public CustomNumber(double value) { + this.value = value; + } + + @Override + public int intValue() { + throw new UnsupportedOperationException(); + } + + @Override + public long longValue() { + throw new UnsupportedOperationException(); + } + + @Override + public float floatValue() { + throw new UnsupportedOperationException(); + } + + @Override + public double doubleValue() { + return value; + } + } +} \ No newline at end of file diff --git a/metric-publishers/cloudwatch-metric-publisher/src/test/resources/log4j.properties b/metric-publishers/cloudwatch-metric-publisher/src/test/resources/log4j.properties new file mode 100644 index 000000000000..6fa311bc45f9 --- /dev/null +++ b/metric-publishers/cloudwatch-metric-publisher/src/test/resources/log4j.properties @@ -0,0 +1,35 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +log4j.rootLogger=INFO, A1 +log4j.appender.A1=org.apache.log4j.ConsoleAppender +log4j.appender.A1.layout=org.apache.log4j.PatternLayout + +# Print the date in ISO 8601 format +log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n + +# Adjust to see more / less logging +#log4j.logger.com.amazonaws.ec2=DEBUG + +# HttpClient 3 Wire Logging +#log4j.logger.httpclient.wire=DEBUG + +# HttpClient 4 Wire Logging +# log4j.logger.org.apache.http.wire=INFO +# log4j.logger.org.apache.http=DEBUG +# log4j.logger.org.apache.http.wire=DEBUG +# log4j.logger.software.amazonaws.awssdk=DEBUG + + diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml new file mode 100644 index 000000000000..02970cc2ed98 --- /dev/null +++ b/metric-publishers/pom.xml @@ -0,0 +1,101 @@ + + + 4.0.0 + + software.amazon.awssdk + aws-sdk-java-pom + 2.13.56-SNAPSHOT + + + metric-publishers + AWS Java SDK :: Metric Publishers + pom + + + cloudwatch-metric-publisher + + + + + + software.amazon.awssdk + bom-internal + ${awsjavasdk.version} + pom + import + + + + + + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + + + software.amazon.awssdk + utils + ${awsjavasdk.version} + + + junit + junit + test + + + log4j + log4j + test + + + org.slf4j + slf4j-log4j12 + test + + + io.reactivex.rxjava2 + rxjava + test + + + org.assertj + assertj-core + test + + + software.amazon.awssdk + test-utils + ${awsjavasdk.version} + test + + + org.hamcrest + hamcrest-all + test + + + wiremock + com.github.tomakehurst + test + + + mockito-core + org.mockito + test + + + diff --git a/pom.xml b/pom.xml index f3c5728315fc..10cdf34e9c48 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -62,6 +62,7 @@ codegen-maven-plugin bundle build-tools + metric-publishers release-scripts utils codegen-lite @@ -83,8 +84,7 @@ ${project.version} - 2.10.0 - 2.9.0 + 2.10.4 1.0.1 1.2.0 3.4 @@ -108,6 +108,7 @@ 2.1.9 1.10 1.21 + 0.6.3 4.12 @@ -131,6 +132,7 @@ 2.22.2 3.1.1 3.0.1 + yyyy 3.1.1 1.6 8.29 @@ -1060,7 +1062,9 @@ software.amazon.awssdk* - +

AWS SDK for Java API Reference - ${project.version}]]>
+
+ Copyright © ${maven.build.timestamp} Amazon Web Services, Inc. All Rights Reserved.]]> diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index c87a547bc4af..562a4eeabe8f 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../pom.xml release-scripts diff --git a/services-custom/dynamodb-enhanced/README.md b/services-custom/dynamodb-enhanced/README.md index d7bdecbc1c1e..f76e3776af09 100644 --- a/services-custom/dynamodb-enhanced/README.md +++ b/services-custom/dynamodb-enhanced/README.md @@ -2,10 +2,6 @@ Mid-level DynamoDB mapper/abstraction for Java using the v2 AWS SDK. -Warning: This package is provided for preview and comment purposes only. -It is not asserted to be stable or correct, and is subject to frequent -breaking changes. - ## Getting Started All the examples below use a fictional Customer class. This class is completely made up and not part of this library. Any search or key @@ -159,7 +155,6 @@ index. Here's an example of how to do this: PageIterable customersWithName = customersByName.query(r -> r.queryConditional(equalTo(k -> k.partitionValue("Smith")))); ``` - ### Non-blocking asynchronous operations If your application requires non-blocking asynchronous calls to DynamoDb, then you can use the asynchronous implementation of the @@ -195,8 +190,7 @@ key differences: // Perform other work and let the processor handle the results asynchronously ``` - -### Using extensions +## Using extensions The mapper supports plugin extensions to provide enhanced functionality beyond the simple primitive mapped operations. Extensions have two hooks, beforeWrite() and afterRead(); the former can modify a write operation before it happens, @@ -221,7 +215,7 @@ DynamoDbEnhancedClient enhancedClient = .build(); ``` -#### VersionedRecordExtension +### VersionedRecordExtension This extension is loaded by default and will increment and track a record version number as records are written to the database. A condition will be added to every @@ -248,14 +242,143 @@ Or using a StaticTableSchema: .tags(versionAttribute()) ``` -## Advanced StaticTableSchema scenarios +## Advanced table schema features +### Explicitly include/exclude attributes in DDB mapping +#### Excluding attributes +Ignore attributes that should not participate in mapping to DDB +Mark the attribute with the @DynamoDbIgnore annotation: +```java +private String internalKey; + +@DynamoDbIgnore +public String getInternalKey() { return this.internalKey; } +public void setInternalKey(String internalKey) { return this.internalKey = internalKey;} +``` +#### Including attributes +Change the name used to store an attribute in DBB by explicitly marking it with the + @DynamoDbAttribute annotation and supplying a different name: +```java +private String internalKey; + +@DynamoDbAttribute("renamedInternalKey") +public String getInternalKey() { return this.internalKey; } +public void setInternalKey(String internalKey) { return this.internalKey = internalKey;} +``` + +### Control attribute conversion +By default, the table schema provides converters for all primitive and many common Java types +through a default implementation of the AttributeConverterProvider interface. This behavior +can be changed both at the attribute converter provider level as well as for a single attribute. + +#### Provide custom attribute converter providers +You can provide a single AttributeConverterProvider or a chain of ordered AttributeConverterProviders +through the @DynamoDbBean 'converterProviders' annotation. Any custom AttributeConverterProvider must extend the AttributeConverterProvider +interface. + +Note that if you supply your own chain of attribute converter providers, you will override +the default converter provider (DefaultAttributeConverterProvider) and must therefore include it in the chain if you wish to +use its attribute converters. It's also possible to annotate the bean with an empty array `{}`, thus +disabling the usage of any attribute converter providers including the default, in which case +all attributes must have their own attribute converters (see below). + +Single converter provider: +```java +@DynamoDbBean(converterProviders = ConverterProvider1.class) +public class Customer { + +} +``` + +Chain of converter providers ending with the default (least priority): +```java +@DynamoDbBean(converterProviders = { + ConverterProvider1.class, + ConverterProvider2.class, + DefaultAttributeConverterProvider.class}) +public class Customer { + +} +``` + +In the same way, adding a chain of attribute converter providers directly to a StaticTableSchema: +```java +private static final StaticTableSchema CUSTOMER_TABLE_SCHEMA = + StaticTableSchema.builder(Customer.class) + .newItemSupplier(Customer::new) + .addAttribute(String.class, a -> a.name("name") + a.getter(Customer::getName) + a.setter(Customer::setName)) + .attributeConverterProviders(converterProvider1, converterProvider2) + .build(); +``` + +#### Override the mapping of a single attribute +Supply an AttributeConverter when creating the attribute to directly override any +converters provided by the table schema AttributeConverterProviders. Note that you will +only add a custom converter for that attribute; other attributes, even of the same +type, will not use that converter unless explicitly specified for those other attributes. + +Example: +```java +@DynamoDbBean +public class Customer { + private String name; + + @DynamoDbConvertedBy(CustomAttributeConverter.class) + public String getName() { return this.name; } + public void setName(String name) { this.name = name;} +} +``` +For StaticTableSchema: +```java +private static final StaticTableSchema CUSTOMER_TABLE_SCHEMA = + StaticTableSchema.builder(Customer.class) + .newItemSupplier(Customer::new) + .addAttribute(String.class, a -> a.name("name") + a.getter(Customer::getName) + a.setter(Customer::setName) + a.attributeConverter(customAttributeConverter)) + .build(); +``` + ### Flat map attributes from another class If the attributes for your table record are spread across several different Java objects, either through inheritance or composition, the static TableSchema implementation gives you a method of flat mapping those attributes and rolling them up into a single schema. -To accomplish this using inheritance:- +#### Using inheritance +To accomplish flat map using inheritance, the only requirement is that +both classes are annotated as a DynamoDb bean: + +```java +@DynamoDbBean +public class Customer extends GenericRecord { + private String name; + private GenericRecord record; + + public String getName() { return this.name; } + public void setName(String name) { this.name = name;} + + public String getRecord() { return this.record; } + public void setRecord(String record) { this.record = record;} +} + +@DynamoDbBean +public abstract class GenericRecord { + private String id; + private String createdDate; + + public String getId() { return this.id; } + public void setId(String id) { this.id = id;} + + public String getCreatedDate() { return this.createdDate; } + public void setCreatedDate(String createdDate) { this.createdDate = createdDate;} +} + +``` + +For StaticTableSchema, use the 'extend' feature to achieve the same effect: ```java @Data public class Customer extends GenericRecord { @@ -270,53 +393,96 @@ public abstract class GenericRecord { private static final StaticTableSchema GENERIC_RECORD_SCHEMA = StaticTableSchema.builder(GenericRecord.class) - .attributes( - // The partition key will be inherited by the top level mapper - stringAttribute("id", GenericRecord::getId, GenericRecord::setId).as(primaryPartitionKey()), - stringAttribute("created_date", GenericRecord::getCreatedDate, GenericRecord::setCreatedDate)) - .build(); + // The partition key will be inherited by the top level mapper + .addAttribute(String.class, a -> a.name("id") + .getter(GenericRecord::getId) + .setter(GenericRecord::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("created_date") + .getter(GenericRecord::getCreatedDate) + .setter(GenericRecord::setCreatedDate)) + .build(); private static final StaticTableSchema CUSTOMER_TABLE_SCHEMA = StaticTableSchema.builder(Customer.class) .newItemSupplier(Customer::new) - .attributes( - stringAttribute("name", Customer::getName, Customer::setName)) + .addAttribute(String.class, a -> a.name("name") + .getter(Customer::getName) + .setter(Customer::setName)) .extend(GENERIC_RECORD_SCHEMA) // All the attributes of the GenericRecord schema are added to Customer .build(); ``` +#### Using composition + +Using composition, the @DynamoDbFlatten annotation flat maps the composite class: +```java +@DynamoDbBean +public class Customer { + private String name; + private GenericRecord record; + + public String getName() { return this.name; } + public void setName(String name) { this.name = name;} + + @DynamoDbFlatten(dynamoDbBeanClass = GenericRecord.class) + public String getRecord() { return this.record; } + public void setRecord(String record) { this.record = record;} +} + +@DynamoDbBean +public class GenericRecord { + private String id; + private String createdDate; + + public String getId() { return this.id; } + public void setId(String id) { this.id = id;} + + public String getCreatedDate() { return this.createdDate; } + public void setCreatedDate(String createdDate) { this.createdDate = createdDate;} +} +``` +You can flatten as many different eligible classes as you like using the flatten annotation. +The only constraints are that attributes must not have the same name when they are being rolled +together, and there must never be more than one partition key, sort key or table name. + +Flat map composite classes using StaticTableSchema: -Using composition: ```java @Data public class Customer{ private String name; private GenericRecord recordMetadata; + //getters and setters for all attributes } @Data public class GenericRecord { private String id; private String createdDate; + //getters and setters for all attributes } private static final StaticTableSchema GENERIC_RECORD_SCHEMA = StaticTableSchema.builder(GenericRecord.class) - .newItemSupplier(GenericRecord::new) - .attributes( - stringAttribute("id", GenericRecord::getId, GenericRecord::setId).as(primaryPartitionKey()), - stringAttribute("created_date", GenericRecord::getCreatedDate, GenericRecord::setCreatedDate)) - .build(); + .addAttribute(String.class, a -> a.name("id") + .getter(GenericRecord::getId) + .setter(GenericRecord::setId) + .tags(primaryPartitionKey())) + .addAttribute(String.class, a -> a.name("created_date") + .getter(GenericRecord::getCreatedDate) + .setter(GenericRecord::setCreatedDate)) + .build(); private static final StaticTableSchema CUSTOMER_TABLE_SCHEMA = StaticTableSchema.builder(Customer.class) .newItemSupplier(Customer::new) - .attributes(stringAttribute("name", Customer::getName, Customer::setName)) + .addAttribute(String.class, a -> a.name("name") + .getter(Customer::getName) + .setter(Customer::setName)) // Because we are flattening a component object, we supply a getter and setter so the // mapper knows how to access it - .flatten(CUSTOMER_TABLE_SCHEMA, Customer::getRecordMetadata, Customer::setRecordMetadata) + .flatten(GENERIC_RECORD_SCHEMA, Customer::getRecordMetadata, Customer::setRecordMetadata) .build(); ``` -You can flatten as many different eligible classes as you like using the -builder pattern. The only constraints are that attributes must not have -the same name when they are being rolled together, and there must never -be more than one partition key, sort key or table name. \ No newline at end of file +Just as for annotations, you can flatten as many different eligible classes as you like using the +builder pattern. \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index 4a15090871b9..91865cbf0e1d 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,10 +21,10 @@ software.amazon.awssdk services-custom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT dynamodb-enhanced - ${awsjavasdk.version}-PREVIEW + ${awsjavasdk.version} AWS Java SDK :: DynamoDB :: Enhanced Client https://aws.amazon.com/sdkforjava diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverter.java index bada6cc68834..de36eab8c66f 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverter.java @@ -18,7 +18,6 @@ import java.time.Instant; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsIntegerAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsStringAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.StringAttributeConverter; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @@ -31,8 +30,6 @@ *
    *
  • The {@link StringAttributeConverter} converts a {@link String} into a DynamoDB string * ({@link software.amazon.awssdk.services.dynamodb.model.AttributeValue#s()}).
  • - *
  • The {@link InstantAsIntegerAttributeConverter} converts an {@link Instant} into a DynamoDB number - * ({@link software.amazon.awssdk.services.dynamodb.model.AttributeValue#n()}).
  • *
  • The {@link InstantAsStringAttributeConverter} converts an {@link Instant} into a DynamoDB string * ({@link software.amazon.awssdk.services.dynamodb.model.AttributeValue#s()}).
  • *
@@ -67,7 +64,8 @@ public interface AttributeConverter { * InstantAsStringAttributeConverter converter = InstantAsStringAttributeConverter.create(); * assertEquals(converter.transformTo(EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z").toAttributeValue()), * Instant.EPOCH); - *
+     * }
+     * 
*/ T transformTo(AttributeValue input); diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverterProvider.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverterProvider.java index 813ebf1a54ea..a20608535895 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverterProvider.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/AttributeConverterProvider.java @@ -16,7 +16,7 @@ package software.amazon.awssdk.enhanced.dynamodb; import software.amazon.awssdk.annotations.SdkPublicApi; -import software.amazon.awssdk.enhanced.dynamodb.internal.DefaultAttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterProviderResolver; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; /** @@ -40,6 +40,6 @@ public interface AttributeConverterProvider { * standard Java type converters included. */ static AttributeConverterProvider defaultProvider() { - return DefaultAttributeConverterProvider.create(); + return ConverterProviderResolver.defaultConverterProvider(); } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultAttributeConverterProvider.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java similarity index 92% rename from services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultAttributeConverterProvider.java rename to services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java index 7b358b1e4143..34a3f159750c 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultAttributeConverterProvider.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DefaultAttributeConverterProvider.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.enhanced.dynamodb.internal; +package software.amazon.awssdk.enhanced.dynamodb; import java.util.ArrayList; import java.util.List; @@ -22,11 +22,8 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import software.amazon.awssdk.annotations.Immutable; -import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; -import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.PrimitiveConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.StringConverterProvider; @@ -46,7 +43,7 @@ import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DurationAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnumAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.FloatAttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsIntegerAttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsStringAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.IntegerAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ListAttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalDateAttributeConverter; @@ -76,11 +73,15 @@ import software.amazon.awssdk.utils.Validate; /** + * This class is the default attribute converter provider in the DDB Enhanced library. When instantiated + * using the constructor {@link #DefaultAttributeConverterProvider()} or the {@link #create()} method, it's loaded + * with the currently supported attribute converters in the library. *

- * Given an input, this will identify a converter that can convert the specific Java type and invoke it. If a converter cannot - * be found, it will invoke a "parent" converter, which would be expected to be able to convert the value (or throw an exception). + * Given an input, the method {@link #converterFor(EnhancedType)} will identify a converter that can convert the + * specific Java type and invoke it. If a converter cannot be found, it will invoke a "parent" converter, + * which would be expected to be able to convert the value (or throw an exception). */ -@SdkInternalApi +@SdkPublicApi @ThreadSafe @Immutable public final class DefaultAttributeConverterProvider implements AttributeConverterProvider { @@ -102,6 +103,21 @@ private DefaultAttributeConverterProvider(Builder builder) { } } + /** + * Returns an attribute converter provider with all default converters set. + */ + public DefaultAttributeConverterProvider() { + this(getDefaultBuilder()); + } + + /** + * Returns an attribute converter provider with all default converters set. + */ + public static DefaultAttributeConverterProvider create() { + return getDefaultBuilder().build(); + } + + /** * Equivalent to {@code builder(EnhancedType.of(Object.class))}. */ @@ -179,7 +195,7 @@ private AttributeConverter createSetConverter(EnhancedType type) { return (AttributeConverter) SetAttributeConverter.setConverter(innerConverter); } - public static DefaultAttributeConverterProvider create() { + private static Builder getDefaultBuilder() { return DefaultAttributeConverterProvider.builder() .addConverter(AtomicBooleanAttributeConverter.create()) .addConverter(AtomicIntegerAttributeConverter.create()) @@ -195,7 +211,7 @@ public static DefaultAttributeConverterProvider create() { .addConverter(DoubleAttributeConverter.create()) .addConverter(DurationAttributeConverter.create()) .addConverter(FloatAttributeConverter.create()) - .addConverter(InstantAsIntegerAttributeConverter.create()) + .addConverter(InstantAsStringAttributeConverter.create()) .addConverter(IntegerAttributeConverter.create()) .addConverter(LocalDateAttributeConverter.create()) .addConverter(LocalDateTimeAttributeConverter.create()) @@ -217,8 +233,7 @@ public static DefaultAttributeConverterProvider create() { .addConverter(UuidAttributeConverter.create()) .addConverter(ZonedDateTimeAsStringAttributeConverter.create()) .addConverter(ZoneIdAttributeConverter.create()) - .addConverter(ZoneOffsetAttributeConverter.create()) - .build(); + .addConverter(ZoneOffsetAttributeConverter.create()); } /** diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbAsyncTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbAsyncTable.java index 885fde63d075..eb5a4bf15169 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbAsyncTable.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbAsyncTable.java @@ -356,6 +356,7 @@ default CompletableFuture getItem(T keyItem) { * .build()); * publisher.subscribe(page -> page.items().forEach(item -> System.out.println(item))); * } + * *

* 2) Subscribing to items across all pages *

diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbExtensionContext.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbExtensionContext.java
index f1b5edb4cdaf..960386d8406f 100644
--- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbExtensionContext.java
+++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/DynamoDbExtensionContext.java
@@ -17,7 +17,6 @@
 
 import java.util.Map;
 import software.amazon.awssdk.annotations.SdkPublicApi;
-import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext;
 import software.amazon.awssdk.services.dynamodb.model.AttributeValue;
 
 /**
diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedType.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedType.java
index 62e0446c1148..c5761e5358f9 100644
--- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedType.java
+++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedType.java
@@ -64,7 +64,7 @@ public class EnhancedType {
      * Create a type token, capturing the generic type arguments of the token as {@link Class}es.
      *
      * 

- * This must be called from an anonymous subclass. For example, + * This must be called from an anonymous subclass. For example, * {@code new EnhancedType>(){}} (note the extra {}) for a {@code EnhancedType>}. */ protected EnhancedType() { @@ -508,7 +508,7 @@ private StringBuilder innerToString() { StringBuilder result = new StringBuilder(); result.append(rawClass.getTypeName()); - if (!rawClassParameters.isEmpty()) { + if (null != rawClassParameters && !rawClassParameters.isEmpty()) { result.append("<"); result.append(rawClassParameters.stream().map(EnhancedType::innerToString).collect(Collectors.joining(", "))); result.append(">"); diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/OperationContext.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/OperationContext.java new file mode 100644 index 000000000000..f0e48375bd8e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/OperationContext.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb; + +import software.amazon.awssdk.annotations.SdkPublicApi; + +/** + * A context object that is associated with a specific operation and identifies the resources that the operation is + * meant to operate on. + *

+ * This context is passed to and can be read by extension hooks (see {@link DynamoDbEnhancedClientExtension}). + */ +@SdkPublicApi +public interface OperationContext { + /** + * The name of the table being operated on + */ + String tableName(); + + /** + * The name of the index within the table being operated on. If it is the primary index, then this value will be + * set to the constant {@link TableMetadata#primaryIndexName()}. + */ + String indexName(); +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocument.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocument.java index 3a16dbbf689b..fe2fcae88502 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocument.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocument.java @@ -21,7 +21,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.Document; import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @SdkInternalApi @@ -39,7 +39,7 @@ public static DefaultDocument create(Map itemMap) { public T getItem(MappedTableResource mappedTableResource) { return readAndTransformSingleItem(itemMap, mappedTableResource.tableSchema(), - OperationContext.create(mappedTableResource.tableName()), + DefaultOperationContext.create(mappedTableResource.tableName()), mappedTableResource.mapperExtension()); } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtils.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtils.java index 1287b638c426..c1d4c387a42b 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtils.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtils.java @@ -25,10 +25,10 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.model.Page; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @@ -50,7 +50,11 @@ public static String cleanAttributeName(String key) { char[] chars = key.toCharArray(); for (int i = 0; i < chars.length; ++i) { - if (chars[i] == '*' || chars[i] == '.' || chars[i] == '-') { + if (chars[i] == '*' + || chars[i] == '.' + || chars[i] == '-' + || chars[i] == '#' + || chars[i] == ':') { chars[i] = '_'; somethingChanged = true; } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProvider.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProvider.java new file mode 100644 index 000000000000..a455051adb5f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProvider.java @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; + +/** + * A {@link AttributeConverterProvider} that allows multiple providers to be chained in a specified order + * to act as a single composite provider. When searching for an attribute converter for a type, + * the providers will be called in forward/ascending order, attempting to find a converter from the + * first provider, then the second, and so on, until a match is found or the operation fails. + */ +@SdkInternalApi +public final class ChainConverterProvider implements AttributeConverterProvider { + private final List providerChain; + + private ChainConverterProvider(List providers) { + this.providerChain = new ArrayList<>(providers); + } + + /** + * Construct a new instance of {@link ChainConverterProvider}. + * @param providers A list of {@link AttributeConverterProvider} to chain together. + * @return A constructed {@link ChainConverterProvider} object. + */ + public static ChainConverterProvider create(AttributeConverterProvider... providers) { + return new ChainConverterProvider(Arrays.asList(providers)); + } + + /** + * Construct a new instance of {@link ChainConverterProvider}. + * @param providers A list of {@link AttributeConverterProvider} to chain together. + * @return A constructed {@link ChainConverterProvider} object. + */ + public static ChainConverterProvider create(List providers) { + return new ChainConverterProvider(providers); + } + + public List chainedProviders() { + return Collections.unmodifiableList(this.providerChain); + } + + @Override + public AttributeConverter converterFor(EnhancedType enhancedType) { + return this.providerChain.stream() + .filter(provider -> provider.converterFor(enhancedType) != null) + .map(p -> p.converterFor(enhancedType)) + .findFirst().orElse(null); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolver.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolver.java new file mode 100644 index 000000000000..7f3cdf99ffe5 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolver.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DefaultAttributeConverterProvider; + +/** + * Static module to assist with the initialization of attribute converter providers for a StaticTableSchema. + */ +@SdkInternalApi +public final class ConverterProviderResolver { + + private static final AttributeConverterProvider DEFAULT_ATTRIBUTE_CONVERTER = + DefaultAttributeConverterProvider.create(); + + private ConverterProviderResolver() { + } + + /** + * Static provider for the default attribute converters that are bundled with the DynamoDB Enhanced Client. + * This provider will be used by default unless overridden in the static table schema builder or using bean + * annotations. + */ + public static AttributeConverterProvider defaultConverterProvider() { + return DEFAULT_ATTRIBUTE_CONVERTER; + } + + /** + * Resolves a list of attribute converter providers into a single provider. If the list is a singleton, + * it will just return that provider, otherwise it will combine them into a + * {@link ChainConverterProvider} using the order provided in the list. + * + * @param providers A list of providers to be combined in strict order + * @return A single provider that combines all the supplied providers or null if no providers were supplied + */ + public static AttributeConverterProvider resolveProviders(List providers) { + if (providers == null || providers.isEmpty()) { + return null; + } + + if (providers.size() == 1) { + return providers.get(0); + } + + return ChainConverterProvider.create(providers); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterUtils.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterUtils.java index c6645335c7c5..13e68b25cc01 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterUtils.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterUtils.java @@ -15,6 +15,9 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.converter; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.DoubleAttributeConverter; @@ -49,10 +52,6 @@ public static void validateFloat(Float input) { Validate.isTrue(Float.isFinite(input), "Infinite numbers are not supported by the default converters."); } - public static String padLeft2(int valueToPad) { - return valueToPad > 10 ? Integer.toString(valueToPad) : "0" + valueToPad; - } - public static String padLeft(int paddingAmount, int valueToPad) { String value = Integer.toString(valueToPad); int padding = paddingAmount - value.length(); @@ -64,60 +63,6 @@ public static String padLeft(int paddingAmount, int valueToPad) { return result.toString(); } - public static String padRight(int paddingAmount, String valueToPad) { - StringBuilder result = new StringBuilder(paddingAmount); - result.append(valueToPad); - for (int i = result.length(); i < paddingAmount; i++) { - result.append('0'); - } - return result.toString(); - } - - public static String trimNumber(String number) { - int startInclusive = findTrimInclusiveStart(number, '0', 0); - - if (startInclusive >= number.length()) { - return "0"; - } - - if (!number.contains(".")) { - return number.substring(startInclusive); - } - - int endExclusive = findTrimExclusiveEnd(number, '0', number.length()); - endExclusive = findTrimExclusiveEnd(number, '.', endExclusive); - - if (startInclusive >= endExclusive) { - return "0"; - } - - String result = number.substring(startInclusive, endExclusive); - if (result.startsWith(".")) { - return "0" + result; - } - return result; - } - - private static int findTrimInclusiveStart(String string, char characterToTrim, int startingIndex) { - int startInclusive = startingIndex; - - while (startInclusive < string.length() && string.charAt(startInclusive) == characterToTrim) { - ++startInclusive; - } - - return startInclusive; - } - - private static int findTrimExclusiveEnd(String string, char characterToTrim, int startingIndex) { - int endExclusive = startingIndex; - - while (endExclusive > 0 && string.charAt(endExclusive - 1) == characterToTrim) { - --endExclusive; - } - - return endExclusive; - } - public static String[] splitNumberOnDecimal(String valueToSplit) { int i = valueToSplit.indexOf('.'); if (i == -1) { @@ -128,55 +73,8 @@ public static String[] splitNumberOnDecimal(String valueToSplit) { } } - public static String[] chunk(String valueToChunk, int... splitSizes) { - String[] result = new String[splitSizes.length + 1]; - int splitStartInclusive = chunkLeft(valueToChunk, result, splitSizes); - - Validate.isTrue(splitStartInclusive == valueToChunk.length(), "Value size does not match expected chunking scheme."); - - return result; - } - - public static String[] chunkWithRightOverflow(String valueToChunk, int... splitSizesFromLeft) { - String[] result = new String[splitSizesFromLeft.length + 1]; - int splitStartInclusive = chunkLeft(valueToChunk, result, splitSizesFromLeft); - - result[splitSizesFromLeft.length] = valueToChunk.substring(splitStartInclusive); - - return result; + public static LocalDateTime convertFromLocalDate(LocalDate localDate) { + return LocalDateTime.of(localDate, LocalTime.MIDNIGHT); } - public static String[] chunkWithLeftOverflow(String valueToChunk, int... splitSizesFromRight) { - try { - String[] result = new String[splitSizesFromRight.length + 1]; - int splitEndExclusive = valueToChunk.length(); - - for (int i = splitSizesFromRight.length - 1; i >= 0; i--) { - int splitStartInclusive = splitEndExclusive - splitSizesFromRight[i]; - result[i + 1] = valueToChunk.substring(splitStartInclusive, splitEndExclusive); - splitEndExclusive = splitStartInclusive; - } - - result[0] = valueToChunk.substring(0, splitEndExclusive); - - return result; - } catch (StringIndexOutOfBoundsException e) { - throw new IllegalArgumentException("Invalid format for value.", e); - } - } - - private static int chunkLeft(String valueToChunk, String[] result, int[] splitSizes) { - try { - int splitStartInclusive = 0; - - for (int i = 0; i < splitSizes.length; i++) { - int splitEndExclusive = splitStartInclusive + splitSizes[i]; - result[i] = valueToChunk.substring(splitStartInclusive, splitEndExclusive); - splitStartInclusive = splitEndExclusive; - } - return splitStartInclusive; - } catch (StringIndexOutOfBoundsException e) { - throw new IllegalArgumentException("Invalid format for value.", e); - } - } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/TimeConversion.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/TimeConversion.java deleted file mode 100644 index c2501ce15fa2..000000000000 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/TimeConversion.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.enhanced.dynamodb.internal.converter; - -import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils.padLeft; -import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils.padRight; -import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils.splitNumberOnDecimal; -import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils.trimNumber; - -import java.time.DateTimeException; -import java.time.Instant; -import java.time.OffsetDateTime; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.time.format.DateTimeParseException; -import java.time.temporal.TemporalQuery; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.utils.Validate; - -@SdkInternalApi -public final class TimeConversion { - private static final InstantVisitor INSTANT_VISITOR = new InstantVisitor(); - private static final OffsetDateTimeVisitor OFFSET_DATE_TIME_VISITOR = new OffsetDateTimeVisitor(); - private static final ZonedDateTimeVisitor ZONED_DATE_TIME_VISITOR = new ZonedDateTimeVisitor(); - - private TimeConversion() { - } - - public static AttributeValue toIntegerAttributeValue(Instant instant) { - long instantSeconds = instant.getEpochSecond(); - int nanos = instant.getNano(); - - String value; - if (nanos == 0) { - value = Long.toString(instantSeconds); - } else if (instantSeconds >= 0) { - value = instantSeconds + - "." + padLeft(9, nanos); - } else { - instantSeconds++; - nanos = 1_000_000_000 - nanos; - - value = "-" + - Math.abs(instantSeconds) + - "." + padLeft(9, nanos); - } - - return AttributeValue.builder().n(trimNumber(value)).build(); - } - - public static AttributeValue toStringAttributeValue(Instant instant) { - return AttributeValue.builder().s(DateTimeFormatter.ISO_INSTANT.format(instant)).build(); - } - - public static AttributeValue toStringAttributeValue(OffsetDateTime accessor) { - return AttributeValue.builder().s(DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(accessor)).build(); - } - - public static AttributeValue toStringAttributeValue(ZonedDateTime accessor) { - return AttributeValue.builder().s(DateTimeFormatter.ISO_ZONED_DATE_TIME.format(accessor)).build(); - } - - public static Instant instantFromAttributeValue(EnhancedAttributeValue itemAttributeValue) { - return convert(itemAttributeValue, INSTANT_VISITOR); - } - - public static OffsetDateTime offsetDateTimeFromAttributeValue(EnhancedAttributeValue itemAttributeValue) { - return convert(itemAttributeValue, OFFSET_DATE_TIME_VISITOR); - } - - public static ZonedDateTime zonedDateTimeFromAttributeValue(EnhancedAttributeValue itemAttributeValue) { - return convert(itemAttributeValue, ZONED_DATE_TIME_VISITOR); - } - - private static T convert(EnhancedAttributeValue itemAttributeValue, TypeConvertingVisitor visitor) { - try { - return itemAttributeValue.convert(visitor); - } catch (DateTimeException e) { - throw new IllegalArgumentException(e); - } - } - - private static final class InstantVisitor extends BaseVisitor { - protected InstantVisitor() { - super(Instant.class, Instant::from); - } - - @Override - public Instant convertString(String value) { - try { - return super.convertString(value); - } catch (DateTimeParseException e) { - // Instant has a larger date range (-1,000,000,000 to 1,000,000,000) than zoned or offset date times - // (-999,999,999 to -999,999,999). An Instant was requested, so we try falling back to the ISO_INSTANT - // parser that supports Instant.MIN through Instant.MAX. - try { - return DateTimeFormatter.ISO_INSTANT.parse(value, Instant::from); - } catch (DateTimeParseException e2) { - // Nope, that didn't work either. Report the failures. - throw new IllegalArgumentException("Record could not be parsed with either " + - "DateTimeFormatter.ISO_ZONED_DATE_TIME (" + e.getMessage() + - ") or DateTimeFormatter.ISO_INSTANT (" + e2.getMessage() + - ")."); - } - } - } - - @Override - protected Instant fromUtcInstant(Instant instant) { - return instant; - } - } - - private static final class OffsetDateTimeVisitor extends BaseVisitor { - protected OffsetDateTimeVisitor() { - super(OffsetDateTime.class, OffsetDateTime::from); - } - - @Override - protected OffsetDateTime fromUtcInstant(Instant instant) { - return instant.atOffset(ZoneOffset.UTC); - } - } - - private static final class ZonedDateTimeVisitor extends BaseVisitor { - protected ZonedDateTimeVisitor() { - super(ZonedDateTime.class, ZonedDateTime::from); - } - - @Override - protected ZonedDateTime fromUtcInstant(Instant instant) { - return instant.atZone(ZoneOffset.UTC); - } - } - - private abstract static class BaseVisitor extends TypeConvertingVisitor { - private final TemporalQuery query; - - protected BaseVisitor(Class targetType, TemporalQuery query) { - super(targetType); - this.query = query; - } - - @Override - public T convertString(String value) { - return DateTimeFormatter.ISO_ZONED_DATE_TIME.parse(value, query); - } - - @Override - public T convertNumber(String value) { - String[] splitOnDecimal = splitNumberOnDecimal(value); - - Validate.isTrue(splitOnDecimal[1].length() <= 9, "Nanoseconds must be expressed in 9 or fewer digits."); - - long epochSecond = splitOnDecimal[0].length() == 0 ? 0 : Long.parseLong(splitOnDecimal[0]); - int nanoAdjustment = Integer.parseInt(padRight(9, splitOnDecimal[1])); - - if (value.startsWith("-")) { - nanoAdjustment = -nanoAdjustment; - } - - return fromUtcInstant(Instant.ofEpochSecond(epochSecond, nanoAdjustment)); - } - - protected abstract T fromUtcInstant(Instant instant); - } -} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicBooleanAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicBooleanAttributeConverter.java index f17db0852ca3..fa58f8d5cb7d 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicBooleanAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/AtomicBooleanAttributeConverter.java @@ -22,8 +22,6 @@ import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.AtomicBooleanStringConverter; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; /** @@ -43,8 +41,7 @@ @ThreadSafe @Immutable public final class AtomicBooleanAttributeConverter implements AttributeConverter { - private static final Visitor VISITOR = new Visitor(); - private static final AtomicBooleanStringConverter STRING_CONVERTER = AtomicBooleanStringConverter.create(); + private static final BooleanAttributeConverter BOOLEAN_CONVERTER = BooleanAttributeConverter.create(); private AtomicBooleanAttributeConverter() { } @@ -70,26 +67,6 @@ public AttributeValue transformFrom(AtomicBoolean input) { @Override public AtomicBoolean transformTo(AttributeValue input) { - if (input.bool() != null) { - return EnhancedAttributeValue.fromBoolean(input.bool()).convert(VISITOR); - } - - return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); - } - - private static final class Visitor extends TypeConvertingVisitor { - private Visitor() { - super(AtomicBoolean.class, AtomicBooleanAttributeConverter.class); - } - - @Override - public AtomicBoolean convertString(String value) { - return STRING_CONVERTER.fromString(value); - } - - @Override - public AtomicBoolean convertBoolean(Boolean value) { - return new AtomicBoolean(value); - } + return new AtomicBoolean(BOOLEAN_CONVERTER.transformTo(input)); } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BooleanAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BooleanAttributeConverter.java index 83a2591eb399..e9e8a0a1ebe1 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BooleanAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/BooleanAttributeConverter.java @@ -92,6 +92,15 @@ public Boolean convertString(String value) { return STRING_CONVERTER.fromString(value); } + @Override + public Boolean convertNumber(String value) { + switch (value) { + case "0": return false; + case "1": return true; + default: throw new IllegalArgumentException("Number could not be converted to boolean: " + value); + } + } + @Override public Boolean convertBoolean(Boolean value) { return value; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java index 1404a2bffa54..18395a82656b 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/EnumAttributeConverter.java @@ -33,7 +33,7 @@ * This stores values in DynamoDB as a string. * *

- * This can be created via {@link #create(Class)}. + * This can be created via {@link #create(Class)}. */ @SdkInternalApi public class EnumAttributeConverter> implements AttributeConverter { diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/InstantAsIntegerAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/InstantAsIntegerAttributeConverter.java deleted file mode 100644 index 8e374ef48b87..000000000000 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/InstantAsIntegerAttributeConverter.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; - -import java.time.Instant; -import software.amazon.awssdk.annotations.Immutable; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.annotations.ThreadSafe; -import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; -import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TimeConversion; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; - -/** - * A converter between {@link Instant} and {@link AttributeValue}. - * - *

- * This stores values in DynamoDB as a number, so that they can be sorted numerically as part of a sort key. - * - *

- * Instants are stored in the format "[-]X[.Y]", where X is the number of seconds past the epoch of 1970-01-01T00:00:00Z - * in this instant, and Y is the fraction of seconds, up to the nanosecond precision (Y is at most 9 characters long). - * - *

- * Examples: - *

    - *
  • {@code Instant.EPOCH.plusSeconds(1)} is stored as {@code ItemAttributeValueMapper.fromNumber("1")}
  • - *
  • {@code Instant.EPOCH.minusSeconds(1)} is stored as {@code ItemAttributeValueMapper.fromNumber("-1")}
  • - *
  • {@code Instant.EPOCH.plusMillis(1)} is stored as {@code ItemAttributeValueMapper.fromNumber("0.001")}
  • - *
  • {@code Instant.EPOCH.minusMillis(1)} is stored as {@code ItemAttributeValueMapper.fromNumber("-0.001")}
  • - *
  • {@code Instant.EPOCH.plusNanos(1)} is stored as {@code ItemAttributeValueMapper.fromNumber("0.000000001")}
  • - *
  • {@code Instant.EPOCH.minusNanos(1)} is stored as {@code ItemAttributeValueMapper.fromNumber("-0.000000001")}
  • - *
- * - *

- * This converter can read any values written by itself, {@link InstantAsStringAttributeConverter}, - * {@link OffsetDateTimeAsStringAttributeConverter} or {@link ZonedDateTimeAsStringAttributeConverter}. Offset and zoned times - * will be automatically converted to the equivalent {@code Instant} based on the time zone information in the record (e.g. - * {@code ItemAttributeValueMapper.fromString("1970-01-01T00:00:00+01:00")} will be converted to - * {@code Instant.EPOCH.minus(1, ChronoUnit.HOURS)}). - * - *

- * This can be created via {@link #create()}. - */ -@SdkInternalApi -@ThreadSafe -@Immutable -public final class InstantAsIntegerAttributeConverter implements AttributeConverter { - private InstantAsIntegerAttributeConverter() { - } - - public static InstantAsIntegerAttributeConverter create() { - return new InstantAsIntegerAttributeConverter(); - } - - @Override - public EnhancedType type() { - return EnhancedType.of(Instant.class); - } - - @Override - public AttributeValueType attributeValueType() { - return AttributeValueType.N; - } - - @Override - public AttributeValue transformFrom(Instant input) { - return TimeConversion.toIntegerAttributeValue(input); - } - - @Override - public Instant transformTo(AttributeValue input) { - if (input.n() != null) { - return TimeConversion.instantFromAttributeValue(EnhancedAttributeValue.fromNumber(input.n())); - } - return TimeConversion.instantFromAttributeValue(EnhancedAttributeValue.fromAttributeValue(input)); - } -} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/InstantAsStringAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/InstantAsStringAttributeConverter.java index 74df38a11665..abd2332ffa63 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/InstantAsStringAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/InstantAsStringAttributeConverter.java @@ -22,15 +22,14 @@ import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TimeConversion; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; /** * A converter between {@link Instant} and {@link AttributeValue}. * *

- * This stores values in DynamoDB as a string. If a number is desired (for sorting purposes), use - * {@link InstantAsIntegerAttributeConverter} instead. + * This stores values in DynamoDB as a string. * *

* Values are stored in ISO-8601 format, with nanosecond precision and a time zone of UTC. @@ -39,33 +38,36 @@ * Examples: *

    *
  • {@code Instant.EPOCH.plusSeconds(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1970-01-01T00:00:01Z")}
  • + * an AttributeValue with the String "1970-01-01T00:00:01Z"} *
  • {@code Instant.EPOCH.minusSeconds(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1969-12-31T23:59:59Z")}
  • + * an AttributeValue with the String "1969-12-31T23:59:59Z"} *
  • {@code Instant.EPOCH.plusMillis(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1970-01-01T00:00:00.001Z")}
  • + * an AttributeValue with the String "1970-01-01T00:00:00.001Z"} *
  • {@code Instant.EPOCH.minusMillis(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1969-12-31T23:59:59.999Z")}
  • + * an AttributeValue with the String "1969-12-31T23:59:59.999Z"} *
  • {@code Instant.EPOCH.plusNanos(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1970-01-01T00:00:00.000000001Z")}
  • + * an AttributeValue with the String "1970-01-01T00:00:00.000000001Z"} *
  • {@code Instant.EPOCH.minusNanos(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1969-12-31T23:59:59.999999999Z")}
  • + * an AttributeValue with the String "1969-12-31T23:59:59.999999999Z"} *
- * + * See {@link Instant} for more details on the serialization format. *

- * This converter can read any values written by itself, {@link InstantAsIntegerAttributeConverter}, - * {@link OffsetDateTimeAsStringAttributeConverter} or {@link ZonedDateTimeAsStringAttributeConverter}. Offset and zoned times - * will be automatically converted to the equivalent {@code Instant} based on the time zone information in the record (e.g. - * {@code ItemAttributeValueMapper.fromString("1970-01-01T00:00:00+01:00")} will be converted to - * {@code Instant.EPOCH.minus(1, ChronoUnit.HOURS)}). + * This converter can read any values written by itself, or values with zero offset written by + * {@link OffsetDateTimeAsStringAttributeConverter}, and values with zero offset and without time zone named written by + * {@link ZoneOffsetAttributeConverter}. Offset and zoned times will be automatically converted to the + * equivalent {@link Instant}. * *

+ * This serialization is lexicographically orderable when the year is not negative. + *

* This can be created via {@link #create()}. */ @SdkInternalApi @ThreadSafe @Immutable public final class InstantAsStringAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + private InstantAsStringAttributeConverter() { } @@ -85,11 +87,31 @@ public AttributeValueType attributeValueType() { @Override public AttributeValue transformFrom(Instant input) { - return TimeConversion.toStringAttributeValue(input); + return AttributeValue.builder().s(input.toString()).build(); } @Override public Instant transformTo(AttributeValue input) { - return TimeConversion.instantFromAttributeValue(EnhancedAttributeValue.fromAttributeValue(input)); + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } + + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(Instant.class, InstantAsStringAttributeConverter.class); + } + + @Override + public Instant convertString(String value) { + return Instant.parse(value); + } } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ListAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ListAttributeConverter.java index e0fa10c98b28..40984c049334 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ListAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ListAttributeConverter.java @@ -45,28 +45,36 @@ * *

* A builder is exposed to allow defining how the collection and element types are created and converted: + *

* - * AttributeConverter> listConverter = + * {@literal AttributeConverter> listConverter = * CollectionAttributeConverter.builder(EnhancedType.listOf(Integer.class)) * .collectionConstructor(ArrayList::new) * .elementConverter(IntegerAttributeConverter.create()) - * .build() + * .build()} * * *

* For frequently-used types, static methods are exposed to reduce the amount of boilerplate involved in creation: + *

* - * AttributeConverter> listConverter = - * CollectionAttributeConverter.listConverter(IntegerAttributeConverter.create()); + * {@literal AttributeConverter> listConverter = + * CollectionAttributeConverter.listConverter(IntegerAttributeConverter.create());} + * *

- * AttributeConverter> collectionConverer = - * CollectionAttributeConverter.collectionConverter(IntegerAttributeConverter.create()); + * + * {@literal AttributeConverter> collectionConverer = + * CollectionAttributeConverter.collectionConverter(IntegerAttributeConverter.create());} + * *

- * AttributeConverter> setConverter = - * CollectionAttributeConverter.setConverter(IntegerAttributeConverter.create()); + * + * {@literal AttributeConverter> setConverter = + * CollectionAttributeConverter.setConverter(IntegerAttributeConverter.create());} + * *

- * AttributeConverter> sortedSetConverter = - * CollectionAttributeConverter.sortedSetConverter(IntegerAttributeConverter.create()); + * + * {@literal AttributeConverter> sortedSetConverter = + * CollectionAttributeConverter.sortedSetConverter(IntegerAttributeConverter.create());} * * * @see MapAttributeConverter diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateAttributeConverter.java index 49a5dc37381e..0966c933aeff 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateAttributeConverter.java @@ -24,33 +24,36 @@ import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; /** * A converter between {@link LocalDate} and {@link AttributeValue}. * *

- * This stores and reads values in DynamoDB as a number, so that they can be sorted numerically as part of a sort key. + * This stores and reads values in DynamoDB as a String. * *

- * LocalDateTimes are stored in the format "[-]YYYYMMDD000000", where: + * LocalDates are stored in the official {@link LocalDate} format "[-]YYYY-MM-DD", where: *

    *
  1. Y is a year between {@link Year#MIN_VALUE} and {@link Year#MAX_VALUE} (prefixed with - if it is negative)
  2. *
  3. M is a 2-character, zero-prefixed month between 01 and 12
  4. *
  5. D is a 2-character, zero-prefixed day between 01 and 31
  6. - *
  7. 0 is a 6-character padding allowing for support with {@link LocalDateTimeAttributeConverter}.
  8. *
+ * See {@link LocalDate} for more details on the serialization format. * *

- * This is format-compatible with the {@link LocalDateTimeAttributeConverter}, allowing values stored as {@link LocalDate} to be - * retrieved as {@link LocalDateTime}s and vice-versa. The time associated with a value stored as a {@link LocalDate} is the - * beginning of the day (midnight). + * This is unidirectional format-compatible with the {@link LocalDateTimeAttributeConverter}, allowing values + * stored as {@link LocalDate} to be retrieved as {@link LocalDateTime}s. * *

+ * This serialization is lexicographically orderable when the year is not negative. + *

+ * * Examples: *

    - *
  • {@code LocalDate.of(1988, 5, 21)} is stored as {@code ItemAttributeValueMapper.fromNumber("19880521000000")}
  • - *
  • {@code LocalDateTime.of(-1988, 5, 21)} is stored as {@code ItemAttributeValueMapper.fromNumber("-19880521000000")}
  • + *
  • {@code LocalDate.of(1988, 5, 21)} is stored as as an AttributeValue with the String "1988-05-21"}
  • + *
  • {@code LocalDate.of(0, 1, 1)} is stored as as an AttributeValue with the String "0000-01-01"}
  • *
* *

@@ -60,8 +63,7 @@ @ThreadSafe @Immutable public final class LocalDateAttributeConverter implements AttributeConverter { - private static final LocalDateTimeAttributeConverter LOCAL_DATE_TIME_ATTRIBUTE_CONVERTER = - LocalDateTimeAttributeConverter.create(); + private static final Visitor VISITOR = new Visitor(); private LocalDateAttributeConverter() { } @@ -77,16 +79,36 @@ public EnhancedType type() { @Override public AttributeValueType attributeValueType() { - return AttributeValueType.N; + return AttributeValueType.S; } @Override public AttributeValue transformFrom(LocalDate input) { - return LOCAL_DATE_TIME_ATTRIBUTE_CONVERTER.transformFrom(input.atStartOfDay()); + return AttributeValue.builder().s(input.toString()).build(); } @Override public LocalDate transformTo(AttributeValue input) { - return LOCAL_DATE_TIME_ATTRIBUTE_CONVERTER.transformTo(input).toLocalDate(); + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } + + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(LocalDate.class, InstantAsStringAttributeConverter.class); + } + + @Override + public LocalDate convertString(String value) { + return LocalDate.parse(value); + } } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateTimeAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateTimeAttributeConverter.java index c226d73f1ee4..c33ab2e05578 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateTimeAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalDateTimeAttributeConverter.java @@ -15,9 +15,6 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; -import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils.padLeft; -import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils.padLeft2; - import java.time.LocalDate; import java.time.LocalDateTime; import java.time.Year; @@ -35,10 +32,13 @@ * A converter between {@link LocalDateTime} and {@link AttributeValue}. * *

- * This stores and reads values in DynamoDB as a number, so that they can be sorted numerically as part of a sort key. + * This stores and reads values in DynamoDB as a string. + * + *

+ * Values are stored with nanosecond precision. * *

- * LocalDateTimes are stored in the format "[-]YYYYMMDDHHIISS[.NNNNNNNNN]", where: + * LocalDateTimes are stored in the official {@link LocalDateTime} format "[-]YYYY-MM-DDTHH:II:SS[.NNNNNNNNN]", where: *

    *
  1. Y is a year between {@link Year#MIN_VALUE} and {@link Year#MAX_VALUE} (prefixed with - if it is negative)
  2. *
  3. M is a 2-character, zero-prefixed month between 01 and 12
  4. @@ -49,27 +49,30 @@ *
  5. N is a 9-character, zero-prefixed nanosecond between 000,000,000 and 999,999,999. * The . and N may be excluded if N is 0.
  6. *
- * + * See {@link LocalDateTime} for more details on the serialization format. *

* This is format-compatible with the {@link LocalDateAttributeConverter}, allowing values stored as {@link LocalDate} to be - * retrieved as {@link LocalDateTime}s and vice-versa. The time associated with a value stored as a {@link LocalDate} is the + * retrieved as {@link LocalDateTime}s. The time associated with a value stored as a {@link LocalDate} is the * beginning of the day (midnight). * *

+ * This serialization is lexicographically orderable when the year is not negative. + *

+ * * Examples: *
    *
  • {@code LocalDateTime.of(1988, 5, 21, 0, 0, 0)} is stored as - * {@code ItemAttributeValueMapper.fromNumber("19880521000000")}
  • + * an AttributeValue with the String "1988-05-21T00:00"} *
  • {@code LocalDateTime.of(-1988, 5, 21, 0, 0, 0)} is stored as - * {@code ItemAttributeValueMapper.fromNumber("-19880521000000")}
  • + * an AttributeValue with the String "-1988-05-21T00:00"} *
  • {@code LocalDateTime.of(1988, 5, 21, 0, 0, 0).plusSeconds(1)} is stored as - * {@code ItemAttributeValueMapper.fromNumber("19880521000001")}
  • + * an AttributeValue with the String "1988-05-21T00:00:01"} *
  • {@code LocalDateTime.of(1988, 5, 21, 0, 0, 0).minusSeconds(1)} is stored as - * {@code ItemAttributeValueMapper.fromNumber("19880520235959")}
  • + * an AttributeValue with the String "1988-05-20T23:59:59"} *
  • {@code LocalDateTime.of(1988, 5, 21, 0, 0, 0).plusNanos(1)} is stored as - * {@code ItemAttributeValueMapper.fromNumber("19880521000000.0000000001")}
  • + * an AttributeValue with the String "1988-05-21T00:00:00.0000000001"} *
  • {@code LocalDateTime.of(1988, 5, 21, 0, 0, 0).minusNanos(1)} is stored as - * {@code ItemAttributeValueMapper.fromNumber("19880520235959.999999999")}
  • + * an AttributeValue with the String "1988-05-20T23:59:59.999999999"} *
* *

@@ -92,49 +95,40 @@ public EnhancedType type() { @Override public AttributeValueType attributeValueType() { - return AttributeValueType.N; + return AttributeValueType.S; } @Override public AttributeValue transformFrom(LocalDateTime input) { - String value = "" + - input.getYear() + - padLeft2(input.getMonthValue()) + - padLeft2(input.getDayOfMonth()) + - padLeft2(input.getHour()) + - padLeft2(input.getMinute()) + - padLeft2(input.getSecond()) + - (input.getNano() == 0 ? "" : "." + padLeft(9, input.getNano())); - return AttributeValue.builder().n(value).build(); + return AttributeValue.builder().s(input.toString()).build(); } @Override public LocalDateTime transformTo(AttributeValue input) { - if (input.n() != null) { - return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); } - return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); } private static final class Visitor extends TypeConvertingVisitor { private Visitor() { - super(LocalDateTime.class, InstantAsIntegerAttributeConverter.class); + super(LocalDateTime.class, InstantAsStringAttributeConverter.class); } @Override - public LocalDateTime convertNumber(String value) { - String[] splitOnDecimal = ConverterUtils.splitNumberOnDecimal(value); - String[] chunkedDateTime = ConverterUtils.chunkWithLeftOverflow(splitOnDecimal[0], 2, 2, 2, 2, 2); - - int year = Integer.parseInt(chunkedDateTime[0]); - return LocalDateTime.of(year, - Integer.parseInt(chunkedDateTime[1]), - Integer.parseInt(chunkedDateTime[2]), - Integer.parseInt(chunkedDateTime[3]), - Integer.parseInt(chunkedDateTime[4]), - Integer.parseInt(chunkedDateTime[5]), - Integer.parseInt(splitOnDecimal[1])); + public LocalDateTime convertString(String value) { + if (value.contains("T")) { // AttributeValue.S in LocalDateTime format + return LocalDateTime.parse(value); + } else { // AttributeValue.S in LocalDate format + return ConverterUtils.convertFromLocalDate(LocalDate.parse(value)); + } } } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalTimeAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalTimeAttributeConverter.java index 6678e5265160..4e52cefdf056 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalTimeAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/LocalTimeAttributeConverter.java @@ -15,9 +15,6 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; -import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils.padLeft; -import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils.padLeft2; - import java.time.DateTimeException; import java.time.LocalTime; import software.amazon.awssdk.annotations.Immutable; @@ -26,7 +23,6 @@ import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @@ -34,10 +30,10 @@ * A converter between {@link LocalTime} and {@link AttributeValue}. * *

- * This stores and reads values in DynamoDB as a number, so that they can be sorted numerically as part of a sort key. + * This stores and reads values in DynamoDB as a String. * *

- * LocalTimes are stored in the format "HHIISS[.NNNNNNNNN]", where: + * LocalTimes are stored in the official {@link LocalTime} format "HH:II:SS[.NNNNNNNNN]", where: *

    *
  1. H is a 2-character, zero-prefixed hour between 00 and 23
  2. *
  3. I is a 2-character, zero-prefixed minute between 00 and 59
  4. @@ -45,12 +41,16 @@ *
  5. N is a 9-character, zero-prefixed nanosecond between 000,000,000 and 999,999,999. * The . and N may be excluded if N is 0.
  6. *
+ * See {@link LocalTime} for more details on the serialization format. * *

+ * This serialization is lexicographically orderable. + *

+ * * Examples: *

    - *
  • {@code LocalTime.of(5, 30, 0)} is stored as {@code ItemAttributeValueMapper.fromNumber("053000")}
  • - *
  • {@code LocalDateTime.of(5, 30, 0, 1)} is stored as {@code ItemAttributeValueMapper.fromNumber("053000.000000001")}
  • + *
  • {@code LocalTime.of(5, 30, 0)} is stored as an AttributeValue with the String "05:30"}
  • + *
  • {@code LocalTime.of(5, 30, 0, 1)} is stored as an AttributeValue with the String "05:30:00.000000001"}
  • *
* *

@@ -76,23 +76,18 @@ public EnhancedType type() { @Override public AttributeValueType attributeValueType() { - return AttributeValueType.N; + return AttributeValueType.S; } @Override public AttributeValue transformFrom(LocalTime input) { - String value = "" + - padLeft2(input.getHour()) + - padLeft2(input.getMinute()) + - padLeft2(input.getSecond()) + - (input.getNano() == 0 ? "" : "." + padLeft(9, input.getNano())); - return AttributeValue.builder().n(value).build(); + return AttributeValue.builder().s(input.toString()).build(); } @Override public LocalTime transformTo(AttributeValue input) { - if (input.n() != null) { - return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); } return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); @@ -100,19 +95,13 @@ public LocalTime transformTo(AttributeValue input) { private static final class Visitor extends TypeConvertingVisitor { private Visitor() { - super(LocalTime.class, InstantAsIntegerAttributeConverter.class); + super(LocalTime.class, InstantAsStringAttributeConverter.class); } @Override - public LocalTime convertNumber(String value) { - String[] splitOnDecimal = ConverterUtils.splitNumberOnDecimal(value); - String[] chunkedTime = ConverterUtils.chunk(splitOnDecimal[0], 2, 2, 2); - + public LocalTime convertString(String value) { try { - return LocalTime.of(Integer.parseInt(chunkedTime[0]), - Integer.parseInt(chunkedTime[1]), - Integer.parseInt(chunkedTime[2]), - Integer.parseInt(splitOnDecimal[1])); + return LocalTime.parse(value); } catch (DateTimeException e) { throw new IllegalArgumentException(e); } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MapAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MapAttributeConverter.java index 7d9a1b223629..54a683acbcd9 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MapAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MapAttributeConverter.java @@ -47,25 +47,28 @@ * *

* A builder is exposed to allow defining how the map, key and value types are created and converted: + *

* - * AttributeConverter> mapConverter = + * {@literal AttributeConverter> mapConverter = * MapAttributeConverter.builder(EnhancedType.mapOf(Integer.class, String.class)) * .mapConstructor(HashMap::new) * .keyConverter(MonthDayStringConverter.create()) * .valueConverter(StringAttributeConverter.create()) - * .build(); + * .build();} * * *

* For frequently-used types, static methods are exposed to reduce the amount of boilerplate involved in creation: * - * AttributeConverter> mapConverter = + * {@literal AttributeConverter> mapConverter = * MapAttributeConverter.mapConverter(MonthDayStringConverter.create(), - * StringAttributeConverter.create()); + * StringAttributeConverter.create());} + * *

- * AttributeConverter> sortedMapConverter = + * + * {@literal AttributeConverter> sortedMapConverter = * MapAttributeConverter.sortedMapConverter(MonthDayStringConverter.create(), - * StringAttributeConverter.create()); + * StringAttributeConverter.create());} * * * @see MapAttributeConverter diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MonthDayAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MonthDayAttributeConverter.java index 4d85db6b6e2c..dc85eaf69bc0 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MonthDayAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/MonthDayAttributeConverter.java @@ -15,9 +15,6 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; -import static software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils.padLeft2; - -import java.time.DateTimeException; import java.time.MonthDay; import software.amazon.awssdk.annotations.Immutable; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -25,29 +22,31 @@ import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterUtils; import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.utils.Validate; /** * A converter between {@link MonthDay} and {@link AttributeValue}. * *

- * This stores and reads values in DynamoDB as a number, so that they can be sorted numerically as part of a sort key. + * This stores and reads values in DynamoDB as a String. * *

- * LocalTimes are stored in the format "MMDD", where: + * MonthDays are stored in the official {@link MonthDay} format "--MM-DD", where: *

    *
  1. M is a 2-character, zero-prefixed month between 01 and 12
  2. *
  3. D is a 2-character, zero-prefixed day between 01 and 31
  4. *
+ * See {@link MonthDay} for more details on the serialization format. * *

+ * This serialization is lexicographically orderable. + *

+ * * Examples: *

    - *
  • {@code MonthDay.of(5, 21)} is stored as {@code ItemAttributeValueMapper.fromNumber("0521")}
  • - *
  • {@code MonthDay.of(12, 1)} is stored as {@code ItemAttributeValueMapper.fromNumber("1201")}
  • + *
  • {@code MonthDay.of(5, 21)} is stored as as an AttributeValue with the String "--05-21"}
  • + *
  • {@code MonthDay.of(12, 1)} is stored as as an AttributeValue with the String "--12-01"}
  • *
* *

@@ -73,24 +72,25 @@ public EnhancedType type() { @Override public AttributeValueType attributeValueType() { - return AttributeValueType.N; + return AttributeValueType.S; } @Override public AttributeValue transformFrom(MonthDay input) { - String value = "" + - padLeft2(input.getMonthValue()) + - padLeft2(input.getDayOfMonth()); - return AttributeValue.builder().n(value).build(); + return AttributeValue.builder().s(input.toString()).build(); } @Override public MonthDay transformTo(AttributeValue input) { - if (input.n() != null) { - return EnhancedAttributeValue.fromNumber(input.n()).convert(VISITOR); - } + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } - return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } } private static final class Visitor extends TypeConvertingVisitor { @@ -99,15 +99,8 @@ private Visitor() { } @Override - public MonthDay convertNumber(String value) { - Validate.isTrue(value.length() == 4, "Invalid Month/Day length: %s, expected 4 (MMDD)", value.length()); - String[] chunkedMonthDay = ConverterUtils.chunk(value, 2, 2); - try { - return MonthDay.of(Integer.parseInt(chunkedMonthDay[0]), - Integer.parseInt(chunkedMonthDay[1])); - } catch (DateTimeException e) { - throw new IllegalArgumentException(e); - } + public MonthDay convertString(String value) { + return MonthDay.parse(value); } } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OffsetDateTimeAsStringAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OffsetDateTimeAsStringAttributeConverter.java index 6db1885d27f2..0fbddcc412be 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OffsetDateTimeAsStringAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OffsetDateTimeAsStringAttributeConverter.java @@ -22,7 +22,7 @@ import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TimeConversion; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; /** @@ -40,35 +40,40 @@ * Examples: *

    *
  • {@code OffsetDateTime.MIN} is stored as - * {@code ItemAttributeValueMapper.fromString("-999999999-01-01T00:00:00+18:00")}
  • + * an AttributeValue with the String "-999999999-01-01T00:00+18:00"} *
  • {@code OffsetDateTime.MAX} is stored as - * {@code ItemAttributeValueMapper.fromString("+999999999-12-31T23:59:59.999999999-18:00")}
  • + * an AttributeValue with the String "+999999999-12-31T23:59:59.999999999-18:00"} *
  • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).plusSeconds(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1970-01-01T00:00:01Z")}
  • + * an AttributeValue with the String "1970-01-01T00:00:01Z"} *
  • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).minusSeconds(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1969-12-31T23:59:59Z")}
  • + * an AttributeValue with the String "1969-12-31T23:59:59Z"} *
  • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).plusMillis(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1970-01-01T00:00:00.001Z")}
  • + * an AttributeValue with the String "1970-01-01T00:00:00.001Z"} *
  • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).minusMillis(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1969-12-31T23:59:59.999Z")}
  • + * an AttributeValue with the String "1969-12-31T23:59:59.999Z"} *
  • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).plusNanos(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1970-01-01T00:00:00.000000001Z")}
  • + * an AttributeValue with the String "1970-01-01T00:00:00.000000001Z"} *
  • {@code Instant.EPOCH.atOffset(ZoneOffset.UTC).minusNanos(1)} is stored as - * {@code ItemAttributeValueMapper.fromString("1969-12-31T23:59:59.999999999Z")}
  • + * an AttributeValue with the String "1969-12-31T23:59:59.999999999Z"} *
- * + * See {@link OffsetDateTime} for more details on the serialization format. *

- * This converter can read any values written by itself, {@link InstantAsIntegerAttributeConverter}, - * {@link InstantAsStringAttributeConverter}, or {@link ZonedDateTimeAsStringAttributeConverter}. Values written by - * {@code Instant} converters are treated as if they are in the UTC time zone (and an offset of 0 seconds will be returned). + * This converter can read any values written by itself or {@link InstantAsStringAttributeConverter}, + * and values without a time zone named written by{@link ZonedDateTimeAsStringAttributeConverter}. + * Values written by {@code Instant} converters are treated as if they are in the UTC time zone + * (and an offset of 0 seconds will be returned). * *

+ * This serialization is lexicographically orderable when the year is not negative. + *

* This can be created via {@link #create()}. */ @SdkInternalApi @ThreadSafe @Immutable public final class OffsetDateTimeAsStringAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + public static OffsetDateTimeAsStringAttributeConverter create() { return new OffsetDateTimeAsStringAttributeConverter(); } @@ -85,11 +90,31 @@ public AttributeValueType attributeValueType() { @Override public AttributeValue transformFrom(OffsetDateTime input) { - return TimeConversion.toStringAttributeValue(input); + return AttributeValue.builder().s(input.toString()).build(); } @Override public OffsetDateTime transformTo(AttributeValue input) { - return TimeConversion.offsetDateTimeFromAttributeValue(EnhancedAttributeValue.fromAttributeValue(input)); + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } + + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(OffsetDateTime.class, InstantAsStringAttributeConverter.class); + } + + @Override + public OffsetDateTime convertString(String value) { + return OffsetDateTime.parse(value); + } } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeConverter.java index d33a4cfc1acf..da6550acfaec 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeConverter.java @@ -66,7 +66,7 @@ public AttributeValue transformFrom(Optional input) { @Override public Optional transformTo(AttributeValue input) { Optional result; - if (input.nul()) { + if (Boolean.TRUE.equals(input.nul())) { // This is safe - An Optional.empty() can be used for any Optional subtype. result = Optional.empty(); } else { diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/SetAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/SetAttributeConverter.java index 7bab93eca971..0346c59b5bf1 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/SetAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/SetAttributeConverter.java @@ -50,27 +50,33 @@ *

* A builder is exposed to allow defining how the collection and element types are created and converted: * - * AttributeConverter> listConverter = + * {@literal AttributeConverter> listConverter = * CollectionAttributeConverter.builder(EnhancedType.listOf(Integer.class)) * .collectionConstructor(ArrayList::new) * .elementConverter(IntegerAttributeConverter.create()) - * .build() + * .build()} * * *

* For frequently-used types, static methods are exposed to reduce the amount of boilerplate involved in creation: * - * AttributeConverter> listConverter = - * CollectionAttributeConverter.listConverter(IntegerAttributeConverter.create()); + * {@literal AttributeConverter> listConverter = + * CollectionAttributeConverter.listConverter(IntegerAttributeConverter.create());} + * *

- * AttributeConverter> collectionConverer = - * CollectionAttributeConverter.collectionConverter(IntegerAttributeConverter.create()); + * + * {@literal AttributeConverter> collectionConverer = + * CollectionAttributeConverter.collectionConverter(IntegerAttributeConverter.create());} + * *

- * AttributeConverter> setConverter = - * CollectionAttributeConverter.setConverter(IntegerAttributeConverter.create()); + * + * {@literal AttributeConverter> setConverter = + * CollectionAttributeConverter.setConverter(IntegerAttributeConverter.create());} + * *

- * AttributeConverter> sortedSetConverter = - * CollectionAttributeConverter.sortedSetConverter(IntegerAttributeConverter.create()); + * + * {@literal AttributeConverter> sortedSetConverter = + * CollectionAttributeConverter.sortedSetConverter(IntegerAttributeConverter.create());} * * * @see MapAttributeConverter diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZonedDateTimeAsStringAttributeConverter.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZonedDateTimeAsStringAttributeConverter.java index cdbf2e8871e9..5f0a7a386c73 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZonedDateTimeAsStringAttributeConverter.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/ZonedDateTimeAsStringAttributeConverter.java @@ -24,7 +24,7 @@ import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TimeConversion; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.TypeConvertingVisitor; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; /** @@ -43,27 +43,31 @@ * Examples: *

    *
  • {@code Instant.EPOCH.atZone(ZoneId.of("Europe/Paris"))} is stored as - * {@code 1970-01-01T01:00:00+01:00[Europe/Paris]}
  • + * an AttributeValue with the String "1970-01-01T01:00+01:00[Europe/Paris]"} *
  • {@code OffsetDateTime.MIN.toZonedDateTime()} is stored as - * {@code ItemAttributeValueMapper.fromString("-999999999-01-01T00:00:00+18:00")}
  • + * an AttributeValue with the String "-999999999-01-01T00:00+18:00"} *
  • {@code OffsetDateTime.MAX.toZonedDateTime()} is stored as - * {@code ItemAttributeValueMapper.fromString("+999999999-12-31T23:59:59.999999999-18:00")}
  • + * an AttributeValue with the String "+999999999-12-31T23:59:59.999999999-18:00"} *
  • {@code Instant.EPOCH.atZone(ZoneOffset.UTC)} is stored as - * {@code ItemAttributeValueMapper.fromString("1970-01-01T00:00:00Z")}
  • + * an AttributeValue with the String "1970-01-01T00:00Z"} *
- * + * See {@link OffsetDateTime} for more details on the serialization format. *

- * This converter can read any values written by itself, {@link InstantAsIntegerAttributeConverter}, - * {@link InstantAsStringAttributeConverter}, or {@link OffsetDateTimeAsStringAttributeConverter}. Values written by + * This converter can read any values written by itself, {@link InstantAsStringAttributeConverter}, + * or {@link OffsetDateTimeAsStringAttributeConverter}. Values written by * {@code Instant} converters are treated as if they are in the UTC time zone. * *

+ * This serialization is lexicographically orderable when the year is not negative. + *

* This can be created via {@link #create()}. */ @SdkInternalApi @ThreadSafe @Immutable public final class ZonedDateTimeAsStringAttributeConverter implements AttributeConverter { + private static final Visitor VISITOR = new Visitor(); + public static ZonedDateTimeAsStringAttributeConverter create() { return new ZonedDateTimeAsStringAttributeConverter(); } @@ -80,11 +84,31 @@ public AttributeValueType attributeValueType() { @Override public AttributeValue transformFrom(ZonedDateTime input) { - return TimeConversion.toStringAttributeValue(input); + return AttributeValue.builder().s(input.toString()).build(); } @Override public ZonedDateTime transformTo(AttributeValue input) { - return TimeConversion.zonedDateTimeFromAttributeValue(EnhancedAttributeValue.fromAttributeValue(input)); + try { + if (input.s() != null) { + return EnhancedAttributeValue.fromString(input.s()).convert(VISITOR); + } + + return EnhancedAttributeValue.fromAttributeValue(input).convert(VISITOR); + } catch (RuntimeException e) { + throw new IllegalArgumentException(e); + } + + } + + private static final class Visitor extends TypeConvertingVisitor { + private Visitor() { + super(ZonedDateTime.class, InstantAsStringAttributeConverter.class); + } + + @Override + public ZonedDateTime convertString(String value) { + return ZonedDateTime.parse(value); + } } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/DefaultDynamoDbExtensionContext.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/DefaultDynamoDbExtensionContext.java index a181161c8c4b..f89d9a22ae7e 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/DefaultDynamoDbExtensionContext.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/extensions/DefaultDynamoDbExtensionContext.java @@ -19,19 +19,20 @@ import java.util.Objects; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; /** - * An SDK-internal implementation of {@link DynamoDbExtensionContext.BeforeWrite} and {@link DynamoDbExtensionContext.AfterRead}. + * An SDK-internal implementation of {@link DynamoDbExtensionContext.BeforeWrite} and + * {@link DynamoDbExtensionContext.AfterRead}. */ @SdkInternalApi public final class DefaultDynamoDbExtensionContext implements DynamoDbExtensionContext.BeforeWrite, DynamoDbExtensionContext.AfterRead { - private Map items; - private OperationContext operationContext; - private TableMetadata tableMetadata; + private final Map items; + private final OperationContext operationContext; + private final TableMetadata tableMetadata; private DefaultDynamoDbExtensionContext(Builder builder) { this.items = builder.items; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchableWriteOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchableWriteOperation.java index 65f70d769494..e57520d11645 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchableWriteOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/BatchableWriteOperation.java @@ -17,6 +17,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.services.dynamodb.model.WriteRequest; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperation.java index 97e03583d600..df24b62a392e 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperation.java @@ -23,10 +23,12 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbIndex; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.DynamoDbClient; + /** * Common interface for a single operation that can be executed in a synchronous or non-blocking asynchronous fashion * against a mapped database table. These operations can be made against either the primary index of a table or a diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java index e694531ce44b..9f562a4c3064 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperation.java @@ -27,6 +27,7 @@ import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.model.CreateTableEnhancedRequest; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/OperationContext.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContext.java similarity index 74% rename from services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/OperationContext.java rename to services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContext.java index dfca2dbef33b..254616f21f70 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/OperationContext.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContext.java @@ -16,30 +16,33 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.operations; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; @SdkInternalApi -public class OperationContext { +public class DefaultOperationContext implements OperationContext { private final String tableName; private final String indexName; - private OperationContext(String tableName, String indexName) { + private DefaultOperationContext(String tableName, String indexName) { this.tableName = tableName; this.indexName = indexName; } - public static OperationContext create(String tableName, String indexName) { - return new OperationContext(tableName, indexName); + public static DefaultOperationContext create(String tableName, String indexName) { + return new DefaultOperationContext(tableName, indexName); } - public static OperationContext create(String tableName) { - return new OperationContext(tableName, TableMetadata.primaryIndexName()); + public static DefaultOperationContext create(String tableName) { + return new DefaultOperationContext(tableName, TableMetadata.primaryIndexName()); } + @Override public String tableName() { return tableName; } + @Override public String indexName() { return indexName; } @@ -53,7 +56,7 @@ public boolean equals(Object o) { return false; } - OperationContext that = (OperationContext) o; + DefaultOperationContext that = (DefaultOperationContext) o; if (tableName != null ? ! tableName.equals(that.tableName) : that.tableName != null) { return false; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperation.java index 4603e9c4c96e..1fd5726b4284 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperation.java @@ -15,16 +15,19 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.operations; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.function.Function; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils; import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.Delete; import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; import software.amazon.awssdk.services.dynamodb.model.DeleteItemResponse; @@ -122,14 +125,16 @@ public TransactWriteItem generateTransactWriteItem(TableSchema tableSchema, private DeleteItemRequest.Builder addExpressionsIfExist(DeleteItemRequest.Builder requestBuilder) { if (this.request.conditionExpression() != null) { requestBuilder = requestBuilder.conditionExpression(this.request.conditionExpression().expression()); + Map expressionNames = this.request.conditionExpression().expressionNames(); + Map expressionValues = this.request.conditionExpression().expressionValues(); // Avoiding adding empty collections that the low level SDK will propagate to DynamoDb where it causes error. - if (!this.request.conditionExpression().expressionNames().isEmpty()) { - requestBuilder = requestBuilder.expressionAttributeNames(this.request.conditionExpression().expressionNames()); + if (expressionNames != null && !expressionNames.isEmpty()) { + requestBuilder = requestBuilder.expressionAttributeNames(expressionNames); } - if (!this.request.conditionExpression().expressionValues().isEmpty()) { - requestBuilder = requestBuilder.expressionAttributeValues(this.request.conditionExpression().expressionValues()); + if (expressionValues != null && !expressionValues.isEmpty()) { + requestBuilder = requestBuilder.expressionAttributeValues(expressionValues); } } return requestBuilder; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperation.java index 1779197b14de..e04fb6bba453 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperation.java @@ -20,6 +20,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperation.java index 781d621d1598..8fa6e0b4eff2 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperation.java @@ -18,6 +18,7 @@ import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.DynamoDbClient; @@ -56,7 +57,8 @@ default ResultT executeOnSecondaryIndex(TableSchema tableSchema, String indexName, DynamoDbEnhancedClientExtension extension, DynamoDbClient dynamoDbClient) { - OperationContext context = OperationContext.create(tableName, indexName); + OperationContext context = + DefaultOperationContext.create(tableName, indexName); return execute(tableSchema, context, extension, dynamoDbClient); } @@ -79,7 +81,8 @@ default CompletableFuture executeOnSecondaryIndexAsync(TableSchema executeOnSecondaryIndex(TableSchema tableSche String indexName, DynamoDbEnhancedClientExtension extension, DynamoDbClient dynamoDbClient) { - OperationContext context = OperationContext.create(tableName, indexName); + OperationContext context = DefaultOperationContext.create(tableName, indexName); return execute(tableSchema, context, extension, dynamoDbClient); } @@ -81,7 +82,7 @@ default SdkPublisher> executeOnSecondaryIndexAsync(TableSchema executeOnPrimaryIndex(TableSchema tableSchema DynamoDbEnhancedClientExtension extension, DynamoDbClient dynamoDbClient) { - OperationContext context = OperationContext.create(tableName, TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create(tableName, TableMetadata.primaryIndexName()); return execute(tableSchema, context, extension, dynamoDbClient); } @@ -78,7 +79,7 @@ default PagePublisher executeOnPrimaryIndexAsync(TableSchema table DynamoDbEnhancedClientExtension extension, DynamoDbAsyncClient dynamoDbAsyncClient) { - OperationContext context = OperationContext.create(tableName, TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create(tableName, TableMetadata.primaryIndexName()); return executeAsync(tableSchema, context, extension, dynamoDbAsyncClient); } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperation.java index bfe5ae189bc3..3fa0bbbd5f2f 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperation.java @@ -21,6 +21,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperation.java index bd02191e02a0..5f05d1bfa726 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperation.java @@ -15,13 +15,20 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.operations; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.cleanAttributeName; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Function; +import java.util.function.UnaryOperator; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.pagination.sync.SdkIterable; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils; @@ -37,6 +44,8 @@ public class QueryOperation implements PaginatedTableOperation, PaginatedIndexOperation { + private static final UnaryOperator PROJECTION_EXPRESSION_KEY_MAPPER = k -> "#AMZN_MAPPED_" + cleanAttributeName(k); + private final QueryEnhancedRequest request; private QueryOperation(QueryEnhancedRequest request) { @@ -60,6 +69,19 @@ public QueryRequest generateRequest(TableSchema tableSchema, expressionNames = Expression.joinNames(expressionNames, this.request.filterExpression().expressionNames()); } + String projectionExpression = null; + if (this.request.attributesToProject() != null) { + List placeholders = new ArrayList<>(); + Map projectionPlaceholders = new HashMap<>(); + this.request.attributesToProject().forEach(attr -> { + String placeholder = PROJECTION_EXPRESSION_KEY_MAPPER.apply(attr); + placeholders.add(placeholder); + projectionPlaceholders.put(placeholder, attr); + }); + projectionExpression = String.join(",", placeholders); + expressionNames = Expression.joinNames(expressionNames, projectionPlaceholders); + } + QueryRequest.Builder queryRequest = QueryRequest.builder() .tableName(operationContext.tableName()) .keyConditionExpression(queryExpression.expression()) @@ -68,7 +90,8 @@ public QueryRequest generateRequest(TableSchema tableSchema, .scanIndexForward(this.request.scanIndexForward()) .limit(this.request.limit()) .exclusiveStartKey(this.request.exclusiveStartKey()) - .consistentRead(this.request.consistentRead()); + .consistentRead(this.request.consistentRead()) + .projectionExpression(projectionExpression); if (!TableMetadata.primaryIndexName().equals(operationContext.indexName())) { queryRequest = queryRequest.indexName(operationContext.indexName()); diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperation.java index daa0901f646b..1b999eb5b8f6 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperation.java @@ -15,11 +15,20 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.operations; +import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.cleanAttributeName; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.function.Function; +import java.util.function.UnaryOperator; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.pagination.sync.SdkIterable; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils; @@ -27,6 +36,7 @@ import software.amazon.awssdk.enhanced.dynamodb.model.ScanEnhancedRequest; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.ScanRequest; import software.amazon.awssdk.services.dynamodb.model.ScanResponse; @@ -34,6 +44,8 @@ public class ScanOperation implements PaginatedTableOperation, PaginatedIndexOperation { + private static final UnaryOperator PROJECTION_EXPRESSION_KEY_MAPPER = k -> "#AMZN_MAPPED_" + cleanAttributeName(k); + private final ScanEnhancedRequest request; private ScanOperation(ScanEnhancedRequest request) { @@ -48,20 +60,42 @@ public static ScanOperation create(ScanEnhancedRequest request) { public ScanRequest generateRequest(TableSchema tableSchema, OperationContext operationContext, DynamoDbEnhancedClientExtension extension) { + Map expressionValues = null; + Map expressionNames = null; + + if (this.request.filterExpression() != null) { + expressionValues = this.request.filterExpression().expressionValues(); + expressionNames = this.request.filterExpression().expressionNames(); + } + + String projectionExpression = null; + if (this.request.attributesToProject() != null) { + List placeholders = new ArrayList<>(); + Map projectionPlaceholders = new HashMap<>(); + this.request.attributesToProject().forEach(attr -> { + String placeholder = PROJECTION_EXPRESSION_KEY_MAPPER.apply(attr); + placeholders.add(placeholder); + projectionPlaceholders.put(placeholder, attr); + }); + projectionExpression = String.join(",", placeholders); + expressionNames = Expression.joinNames(expressionNames, projectionPlaceholders); + } + ScanRequest.Builder scanRequest = ScanRequest.builder() .tableName(operationContext.tableName()) .limit(this.request.limit()) .exclusiveStartKey(this.request.exclusiveStartKey()) - .consistentRead(this.request.consistentRead()); + .consistentRead(this.request.consistentRead()) + .expressionAttributeValues(expressionValues) + .expressionAttributeNames(expressionNames) + .projectionExpression(projectionExpression); if (!TableMetadata.primaryIndexName().equals(operationContext.indexName())) { scanRequest = scanRequest.indexName(operationContext.indexName()); } if (this.request.filterExpression() != null) { - scanRequest = scanRequest.filterExpression(this.request.filterExpression().expression()) - .expressionAttributeValues(this.request.filterExpression().expressionValues()) - .expressionAttributeNames(this.request.filterExpression().expressionNames()); + scanRequest = scanRequest.filterExpression(this.request.filterExpression().expression()); } return scanRequest.build(); diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperation.java index 77bb6fec382c..f1f98121100c 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperation.java @@ -18,6 +18,7 @@ import java.util.concurrent.CompletableFuture; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; @@ -54,7 +55,7 @@ default ResultT executeOnPrimaryIndex(TableSchema tableSchema, String tableName, DynamoDbEnhancedClientExtension extension, DynamoDbClient dynamoDbClient) { - OperationContext context = OperationContext.create(tableName, TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create(tableName, TableMetadata.primaryIndexName()); return execute(tableSchema, context, extension, dynamoDbClient); } @@ -76,7 +77,7 @@ default CompletableFuture executeOnPrimaryIndexAsync(TableSchema DynamoDbEnhancedClientExtension extension, DynamoDbAsyncClient dynamoDbAsyncClient) { - OperationContext context = OperationContext.create(tableName, TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create(tableName, TableMetadata.primaryIndexName()); return executeAsync(tableSchema, context, extension, dynamoDbAsyncClient); } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactWriteItemsOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactWriteItemsOperation.java index 9f75de00552f..45a01f773ed9 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactWriteItemsOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactWriteItemsOperation.java @@ -43,6 +43,7 @@ public static TransactWriteItemsOperation create(TransactWriteItemsEnhancedReque public TransactWriteItemsRequest generateRequest(DynamoDbEnhancedClientExtension extension) { return TransactWriteItemsRequest.builder() .transactItems(this.request.transactWriteItems()) + .clientRequestToken(this.request.clientRequestToken()) .build(); } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableReadOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableReadOperation.java index 87393ae6eb25..616e73fab2b1 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableReadOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableReadOperation.java @@ -17,6 +17,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.services.dynamodb.model.TransactGetItem; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableWriteOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableWriteOperation.java index e117e929e01b..1f88e8044e00 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableWriteOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TransactableWriteOperation.java @@ -17,6 +17,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java index cc3ed8000379..489f7a4da493 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperation.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchema.java index 02f1ac85237e..e5cebdd8a233 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchema.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchema.java @@ -57,29 +57,30 @@ /** * Implementation of {@link TableSchema} that builds a table schema based on properties and annotations of a bean * class. Example: - * {@code - * @literal @DynamoDbBean + *

+ * 
+ * {@literal @}DynamoDbBean
  * public class CustomerAccount {
  *     private String unencryptedBillingKey;
  *
- *     @literal @DynamoDbPartitionKey
- *     @literal @DynamoDbSecondarySortKey(indexName = "accounts_by_customer")
+ *     {@literal @}DynamoDbPartitionKey
+ *     {@literal @}DynamoDbSecondarySortKey(indexName = "accounts_by_customer")
  *     public String accountId;
  *
- *     @literal @DynamoDbSortKey
- *     @literal @DynamoDbSecondaryPartitionKey(indexName = "accounts_by_customer")
+ *     {@literal @}DynamoDbSortKey
+ *     {@literal @}DynamoDbSecondaryPartitionKey(indexName = "accounts_by_customer")
  *     public String customerId;
  *
- *     @literal @DynamoDbAttribute("account_status")
+ *     {@literal @}DynamoDbAttribute("account_status")
  *     public CustomerAccountStatus status;
  *
- *     @literal @DynamoDbFlatten(dynamoDbBeanClass = Customer.class)
+ *     {@literal @}DynamoDbFlatten(dynamoDbBeanClass = Customer.class)
  *     public Customer customer;
  *
  *     public Instant createdOn;
  *
  *     // All public fields must be opted out to not participate in mapping
- *     @literal @DynamoDbIgnore
+ *     {@literal @}DynamoDbIgnore
  *     public String internalKey;
  *
  *     public enum CustomerAccountStatus {
@@ -87,14 +88,15 @@
  *         CLOSED
  *     }
  * }
- *
- * @literal @DynamoDbBean
+ * 
+ * {@literal @}DynamoDbBean
  * public class Customer {
  *     public String name;
  *
- *     public List address;
+ *     {@literal public List address;}
  * }
  * }
+ * 
* @param The type of object that this {@link TableSchema} maps to. */ @SdkPublicApi @@ -192,15 +194,14 @@ private static StaticTableSchema createStaticTableSchema(Class beanCla StaticTableSchema.Builder builder = StaticTableSchema.builder(beanClass) .newItemSupplier(newObjectSupplier); - Optional attributeConverterProvider = converterProviderAnnotation(dynamoDbBean); - attributeConverterProvider.ifPresent(builder::attributeConverterProvider); + builder.attributeConverterProviders(createConverterProvidersFromAnnotation(dynamoDbBean)); List> attributes = new ArrayList<>(); Arrays.stream(beanInfo.getPropertyDescriptors()) .filter(BeanTableSchema::isMappableProperty) .forEach(propertyDescriptor -> { - DynamoDbFlatten dynamoDbFlatten = propertyAnnotation(propertyDescriptor, DynamoDbFlatten.class); + DynamoDbFlatten dynamoDbFlatten = getPropertyAnnotation(propertyDescriptor, DynamoDbFlatten.class); if (dynamoDbFlatten != null) { builder.flatten(createStaticTableSchema(dynamoDbFlatten.dynamoDbBeanClass()), @@ -210,7 +211,8 @@ private static StaticTableSchema createStaticTableSchema(Class beanCla StaticAttribute.Builder attributeBuilder = staticAttributeBuilder(propertyDescriptor, beanClass); - Optional attributeConverter = attributeConverterAnnotation(propertyDescriptor); + Optional attributeConverter = + createAttributeConverterFromAnnotation(propertyDescriptor); attributeConverter.ifPresent(attributeBuilder::attributeConverter); addTagsToAttribute(attributeBuilder, propertyDescriptor); @@ -223,12 +225,12 @@ private static StaticTableSchema createStaticTableSchema(Class beanCla return builder.build(); } - private static Optional converterProviderAnnotation(DynamoDbBean dynamoDbBean) { - Class[] converterClasses = dynamoDbBean.converterProviders(); - //TODO: temporary solution to pick one AttributeConverterProvider. - return converterClasses.length > 0 ? - Optional.of((AttributeConverterProvider) newObjectSupplierForClass(converterClasses[0]).get()) : - Optional.empty(); + private static List createConverterProvidersFromAnnotation(DynamoDbBean dynamoDbBean) { + Class[] providerClasses = dynamoDbBean.converterProviders(); + + return Arrays.stream(providerClasses) + .map(c -> (AttributeConverterProvider) newObjectSupplierForClass(c).get()) + .collect(Collectors.toList()); } private static StaticAttribute.Builder staticAttributeBuilder(PropertyDescriptor propertyDescriptor, @@ -283,16 +285,19 @@ private static EnhancedType convertTypeToEnhancedType(Type type) { return EnhancedType.of(type); } - private static Optional attributeConverterAnnotation(PropertyDescriptor propertyDescriptor) { - DynamoDbConvertedBy attributeConverterBean = propertyAnnotation(propertyDescriptor, DynamoDbConvertedBy.class); + private static Optional createAttributeConverterFromAnnotation( + PropertyDescriptor propertyDescriptor) { + DynamoDbConvertedBy attributeConverterBean = + getPropertyAnnotation(propertyDescriptor, DynamoDbConvertedBy.class); Optional> optionalClass = Optional.ofNullable(attributeConverterBean) .map(DynamoDbConvertedBy::value); return optionalClass.map(clazz -> (AttributeConverter) newObjectSupplierForClass(clazz).get()); } /** - * This method scans all the annotations on a property and looks for a meta-annotation of {@link BeanTableSchemaAttributeTag}. - * If the meta-annotation is found, it attempts to create an annotation tag based on a standard named static method + * This method scans all the annotations on a property and looks for a meta-annotation of + * {@link BeanTableSchemaAttributeTag}. If the meta-annotation is found, it attempts to create + * an annotation tag based on a standard named static method * of the class that tag has been annotated with passing in the original property annotation as an argument. */ private static void addTagsToAttribute(StaticAttribute.Builder attributeBuilder, @@ -359,7 +364,7 @@ private static BiConsumer setterForProperty(PropertyDescriptor prop } private static String attributeNameForProperty(PropertyDescriptor propertyDescriptor) { - DynamoDbAttribute dynamoDbAttribute = propertyAnnotation(propertyDescriptor, DynamoDbAttribute.class); + DynamoDbAttribute dynamoDbAttribute = getPropertyAnnotation(propertyDescriptor, DynamoDbAttribute.class); if (dynamoDbAttribute != null) { return dynamoDbAttribute.value(); } @@ -370,11 +375,11 @@ private static String attributeNameForProperty(PropertyDescriptor propertyDescri private static boolean isMappableProperty(PropertyDescriptor propertyDescriptor) { return propertyDescriptor.getReadMethod() != null && propertyDescriptor.getWriteMethod() != null - && propertyAnnotation(propertyDescriptor, DynamoDbIgnore.class) == null; + && getPropertyAnnotation(propertyDescriptor, DynamoDbIgnore.class) == null; } - private static R propertyAnnotation(PropertyDescriptor propertyDescriptor, - Class annotationType) { + private static R getPropertyAnnotation(PropertyDescriptor propertyDescriptor, + Class annotationType) { R getterAnnotation = propertyDescriptor.getReadMethod().getAnnotation(annotationType); R setterAnnotation = propertyDescriptor.getWriteMethod().getAnnotation(annotationType); diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java index c9608dd881ba..951ddc29b49d 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttribute.java @@ -37,16 +37,16 @@ *

* The recommended way to use this class is by calling {@link StaticTableSchema.Builder#addAttribute(Class, Consumer)}. * Example: - * {@code + *

{@code
  * StaticTableSchema.builder()
  *                  .addAttribute(String.class,
  *                                a -> a.name("customer_name").getter(Customer::getName).setter(Customer::setName))
  *                  // ...
  *                  .build();
- * }
+ * }
*

* It's also possible to construct this class on its own using the static builder. Example: - * {@code + *

{@code
  * StaticAttribute customerNameAttribute =
  *     StaticAttribute.builder(Customer.class, String.class)
  *                    .name("customer_name")
@@ -54,6 +54,7 @@
  *                    .setter(Customer::setName)
  *                    .build();
  * }
+ * 
* @param the class of the item this attribute maps into. * @param the class that the value of this attribute converts to. */ @@ -146,7 +147,8 @@ public Builder toBuilder() { return new Builder(this.type).name(this.name) .getter(this.getter) .setter(this.setter) - .tags(this.tags); + .tags(this.tags) + .attributeConverter(this.attributeConverter); } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java index 6a916ebbdcd7..df63eea259fb 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchema.java @@ -33,8 +33,10 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DefaultAttributeConverterProvider; import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.ConverterProviderResolver; import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.ResolvedStaticAttribute; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @@ -43,7 +45,7 @@ * get and set those attributes. This is the most direct, and thus fastest, implementation of {@link TableSchema}. *

* Example using a fictional 'Customer' data item class:- - * {@code + *

{@code
  * static final TableSchema CUSTOMER_TABLE_SCHEMA =
  *      StaticTableSchema.builder(Customer.class)
  *        .newItemSupplier(Customer::new)
@@ -65,13 +67,10 @@
  *                                           .tags(secondarySortKey("customers_by_date"),
  *                                                 secondarySortKey("customers_by_name")))
  *        .build();
- * }
+ * }
*/ @SdkPublicApi public final class StaticTableSchema implements TableSchema { - private static final AttributeConverterProvider DEFAULT_ATTRIBUTE_CONVERTER = - AttributeConverterProvider.defaultProvider(); - private final List> attributeMappers; private final Supplier newItemSupplier; private final Map> indexedMappers; @@ -82,9 +81,8 @@ public final class StaticTableSchema implements TableSchema { private StaticTableSchema(Builder builder) { StaticTableMetadata.Builder tableMetadataBuilder = StaticTableMetadata.builder(); - this.attributeConverterProvider = builder.attributeConverterProvider != null ? - builder.attributeConverterProvider : - DEFAULT_ATTRIBUTE_CONVERTER; + this.attributeConverterProvider = + ConverterProviderResolver.resolveProviders(builder.attributeConverterProviders); // Resolve declared attributes and find converters for them Stream> attributesStream = builder.attributes == null ? @@ -143,7 +141,8 @@ public static final class Builder { private List> attributes; private Supplier newItemSupplier; private List tags; - private AttributeConverterProvider attributeConverterProvider; + private List attributeConverterProviders = + Collections.singletonList(ConverterProviderResolver.defaultConverterProvider()); private Builder(Class itemClass) { this.itemClass = itemClass; @@ -282,18 +281,53 @@ public Builder addTag(StaticTableTag staticTableTag) { } /** - * A higher-precedence {@link AttributeConverterProvider} than the default one provided by the table schema. - * The {@link AttributeConverterProvider} must provide {@link AttributeConverter}s for all types used in the schema. + * Specifies the {@link AttributeConverterProvider}s to use with the table schema. + * The list of attribute converter providers must provide {@link AttributeConverter}s for all types used + * in the schema. The attribute converter providers will be loaded in the strict order they are supplied here. + *

+ * Calling this method will override the default attribute converter provider + * {@link DefaultAttributeConverterProvider}, which provides standard converters for most primitive + * and common Java types, so that provider must included in the supplied list if it is to be + * used. Providing an empty list here will cause no providers to get loaded. + *

+ * Adding one custom attribute converter provider and using the default as fallback: + * {@code + * builder.attributeConverterProviders(customAttributeConverter, AttributeConverterProvider.defaultProvider()) + * } + * + * @param attributeConverterProviders a list of attribute converter providers to use with the table schema + */ + public Builder attributeConverterProviders(AttributeConverterProvider... attributeConverterProviders) { + this.attributeConverterProviders = Arrays.asList(attributeConverterProviders); + return this; + } + + /** + * Specifies the {@link AttributeConverterProvider}s to use with the table schema. + * The list of attribute converter providers must provide {@link AttributeConverter}s for all types used + * in the schema. The attribute converter providers will be loaded in the strict order they are supplied here. *

- * The table schema has a default, internal, AttributeConverterProvider which provides standard converters - * for most primitive and common Java types. Use custom AttributeConverterProvider when you have specific - * needs for type conversion that the defaults do not cover. + * Calling this method will override the default attribute converter provider + * {@link DefaultAttributeConverterProvider}, which provides standard converters + * for most primitive and common Java types, so that provider must included in the supplied list if it is to be + * used. Providing an empty list here will cause no providers to get loaded. + *

+ * Adding one custom attribute converter provider and using the default as fallback: + * {@code + * List providers = new ArrayList<>( + * customAttributeConverter, + * AttributeConverterProvider.defaultProvider()); + * builder.attributeConverterProviders(providers); + * } + * + * @param attributeConverterProviders a list of attribute converter providers to use with the table schema */ - public Builder attributeConverterProvider(AttributeConverterProvider attributeConverterProvider) { - this.attributeConverterProvider = attributeConverterProvider; + public Builder attributeConverterProviders(List attributeConverterProviders) { + this.attributeConverterProviders = new ArrayList<>(attributeConverterProviders); return this; } + /** * Builds a {@link StaticTableSchema} based on the values this builder has been configured with */ diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java index a122c095795b..d5c554902bdb 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java @@ -22,6 +22,7 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DefaultAttributeConverterProvider; import software.amazon.awssdk.enhanced.dynamodb.mapper.BeanTableSchema; /** @@ -29,15 +30,32 @@ * a {@link BeanTableSchema} must have this annotation. If a class is used as a document within another DynamoDbBean, * it will also require this annotation. *

+ * Attribute Converter Providers
* Using {@link AttributeConverterProvider}s is optional and, if used, the supplied provider supersedes the default - * converter provided by the table schema. The converter must provide {@link AttributeConverter}s for all types used - * in the schema. The table schema default AttributeConverterProvider provides standard converters for most primitive - * and common Java types. Use custom AttributeConverterProviders when you have specific needs for type conversion - * that the defaults do not cover. + * converter provided by the table schema. + *

+ * Note: + *

    + *
  • The converter(s) must provide {@link AttributeConverter}s for all types used in the schema.
  • + *
  • The table schema DefaultAttributeConverterProvider provides standard converters for most primitive + * and common Java types. Use custom AttributeConverterProviders when you have specific needs for type conversion + * that the defaults do not cover.
  • + *
  • If you provide a list of attribute converter providers, you can add DefaultAttributeConverterProvider + * to the end of the list to fall back on the defaults.
  • + *
  • Providing an empty list {} will cause no providers to get loaded.
  • + *
+ * + * Example using attribute converter providers with one custom provider and the default provider: + *
+ * {@code
+ * (converterProviders = {CustomAttributeConverter.class, DefaultAttributeConverterProvider.class});
+ * }
+ * 
*/ @Target({ElementType.TYPE}) @Retention(RetentionPolicy.RUNTIME) @SdkPublicApi public @interface DynamoDbBean { - Class[] converterProviders() default {}; + Class[] converterProviders() + default { DefaultAttributeConverterProvider.class }; } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPage.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPage.java index 6bc5877fb2dd..1a2668b5c660 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPage.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchGetResultPage.java @@ -25,7 +25,7 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.BatchGetItemResponse; @@ -69,7 +69,7 @@ public List resultsForTable(MappedTableResource mappedTable) { return results.stream() .map(itemMap -> readAndTransformSingleItem(itemMap, mappedTable.tableSchema(), - OperationContext.create(mappedTable.tableName()), + DefaultOperationContext.create(mappedTable.tableName()), dynamoDbEnhancedClientExtension)) .collect(Collectors.toList()); } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteResult.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteResult.java index 68278707b69d..118fdf4733a4 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteResult.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/BatchWriteResult.java @@ -24,10 +24,10 @@ import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; -import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; import software.amazon.awssdk.services.dynamodb.model.DeleteRequest; import software.amazon.awssdk.services.dynamodb.model.PutRequest; import software.amazon.awssdk.services.dynamodb.model.WriteRequest; @@ -37,9 +37,9 @@ * {@link DynamoDbEnhancedClient#batchWriteItem(BatchWriteItemEnhancedRequest)}. The result describes any unprocessed items * after the operation completes. *
    - *
  • Use the {@link #unprocessedPutItemsForTable(DynamoDbTable)} method once for each table present in the request + *
  • Use the {@link #unprocessedPutItemsForTable(MappedTableResource)} method once for each table present in the request * to get any unprocessed items from a put action on that table.
  • - *
  • Use the {@link #unprocessedDeleteItemsForTable(DynamoDbTable)} method once for each table present in the request + *
  • Use the {@link #unprocessedDeleteItemsForTable(MappedTableResource)} method once for each table present in the request * to get any unprocessed items from a delete action on that table.
  • *
* @@ -67,7 +67,7 @@ public static Builder builder() { * @param the type of the table items * @return a list of items */ - public List unprocessedPutItemsForTable(DynamoDbTable mappedTable) { + public List unprocessedPutItemsForTable(MappedTableResource mappedTable) { List writeRequests = unprocessedRequests.getOrDefault(mappedTable.tableName(), Collections.emptyList()); @@ -78,7 +78,7 @@ public List unprocessedPutItemsForTable(DynamoDbTable mappedTable) { .map(PutRequest::item) .map(item -> readAndTransformSingleItem(item, mappedTable.tableSchema(), - OperationContext.create(mappedTable.tableName()), + DefaultOperationContext.create(mappedTable.tableName()), mappedTable.mapperExtension())) .collect(Collectors.toList()); } @@ -90,7 +90,7 @@ public List unprocessedPutItemsForTable(DynamoDbTable mappedTable) { * @param mappedTable the table to retrieve unprocessed items for. * @return a list of keys that were not processed as part of the batch request. */ - public List unprocessedDeleteItemsForTable(DynamoDbTable mappedTable) { + public List unprocessedDeleteItemsForTable(MappedTableResource mappedTable) { List writeRequests = unprocessedRequests.getOrDefault(mappedTable.tableName(), Collections.emptyList()); diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ConditionCheck.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ConditionCheck.java index 25d50c572644..879d31bea2c5 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ConditionCheck.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ConditionCheck.java @@ -21,8 +21,8 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.Expression; import software.amazon.awssdk.enhanced.dynamodb.Key; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactableWriteOperation; import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PageIterable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PageIterable.java index 297caba8c736..ffa7b8519526 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PageIterable.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PageIterable.java @@ -45,7 +45,7 @@ * PageIterable results = table.scan(); * results.items().stream().forEach(item -> System.out.println(item)); * } - * + *
* @param The modelled type of the object in a page. */ @SdkPublicApi diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PutItemEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PutItemEnhancedRequest.java index 0135cd1bcf87..a8b1e4208ca4 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PutItemEnhancedRequest.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/PutItemEnhancedRequest.java @@ -114,7 +114,7 @@ public Builder item(T item) { /** * Defines a logical expression on an item's attribute values which, if evaluating to true, - * will allow the delete operation to succeed. If evaluating to false, the operation will not succeed. + * will allow the put operation to succeed. If evaluating to false, the operation will not succeed. *

* See {@link Expression} for condition syntax and examples. * diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryConditional.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryConditional.java index ac774c88acfe..a877244406a5 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryConditional.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryConditional.java @@ -31,11 +31,11 @@ * any specific table or schema and can be re-used in different contexts. *

* Example: - * + *

  * {@code
  * QueryConditional sortValueGreaterThanFour = QueryConditional.sortGreaterThan(k -> k.partitionValue(10).sortValue(4));
  * }
- * 
+ * 
*/ @SdkPublicApi public interface QueryConditional { diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequest.java index b90745a69eba..f944e770639c 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequest.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequest.java @@ -15,7 +15,12 @@ package software.amazon.awssdk.enhanced.dynamodb.model; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbAsyncIndex; @@ -41,6 +46,7 @@ public final class QueryEnhancedRequest { private final Integer limit; private final Boolean consistentRead; private final Expression filterExpression; + private final List attributesToProject; private QueryEnhancedRequest(Builder builder) { this.queryConditional = builder.queryConditional; @@ -49,6 +55,9 @@ private QueryEnhancedRequest(Builder builder) { this.limit = builder.limit; this.consistentRead = builder.consistentRead; this.filterExpression = builder.filterExpression; + this.attributesToProject = builder.attributesToProject != null + ? Collections.unmodifiableList(builder.attributesToProject) + : null; } /** @@ -67,7 +76,8 @@ public Builder toBuilder() { .scanIndexForward(scanIndexForward) .limit(limit) .consistentRead(consistentRead) - .filterExpression(filterExpression); + .filterExpression(filterExpression) + .attributesToProject(attributesToProject); } /** @@ -113,6 +123,13 @@ public Expression filterExpression() { return filterExpression; } + /** + * Returns the list of projected attributes on this request object, or an null if no projection is specified. + */ + public List attributesToProject() { + return attributesToProject; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -142,6 +159,12 @@ public boolean equals(Object o) { if (consistentRead != null ? ! consistentRead.equals(query.consistentRead) : query.consistentRead != null) { return false; } + if (attributesToProject != null + ? ! attributesToProject.equals(query.attributesToProject) + : query.attributesToProject != null + ) { + return false; + } return filterExpression != null ? filterExpression.equals(query.filterExpression) : query.filterExpression == null; } @@ -153,6 +176,7 @@ public int hashCode() { result = 31 * result + (limit != null ? limit.hashCode() : 0); result = 31 * result + (consistentRead != null ? consistentRead.hashCode() : 0); result = 31 * result + (filterExpression != null ? filterExpression.hashCode() : 0); + result = 31 * result + (attributesToProject != null ? attributesToProject.hashCode() : 0); return result; } @@ -168,6 +192,7 @@ public static final class Builder { private Integer limit; private Boolean consistentRead; private Expression filterExpression; + private List attributesToProject; private Builder() { } @@ -255,6 +280,73 @@ public Builder filterExpression(Expression filterExpression) { return this; } + /** + *

+ * Sets a collection of the attribute names to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + *

+ *

+ * If no attribute names are specified, then all attributes will be returned. If any of the requested attributes + * are not found, they will not appear in the result. + *

+ *

+ * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

+ * @param attributesToProject + * A collection of the attributes names to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder attributesToProject(Collection attributesToProject) { + this.attributesToProject = attributesToProject != null ? new ArrayList<>(attributesToProject) : null; + return this; + } + + /** + *

+ * Sets one or more attribute names to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + *

+ *

+ * If no attribute names are specified, then all attributes will be returned. If any of the requested attributes + * are not found, they will not appear in the result. + *

+ *

+ * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

+ * @param attributesToProject + * One or more attributes names to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder attributesToProject(String... attributesToProject) { + return attributesToProject(Arrays.asList(attributesToProject)); + } + + /** + *

+ * Adds a single attribute name to be retrieved from the database. This attribute can include + * scalars, sets, or elements of a JSON document. + *

+ *

+ * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

+ * @param attributeToProject + * An additional single attribute name to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addAttributeToProject(String attributeToProject) { + if (attributesToProject == null) { + attributesToProject = new ArrayList<>(); + } + attributesToProject.add(attributeToProject); + return this; + } + public QueryEnhancedRequest build() { return new QueryEnhancedRequest(this); } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequest.java index 02e91c7728c9..1ec797d6512d 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequest.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequest.java @@ -15,7 +15,12 @@ package software.amazon.awssdk.enhanced.dynamodb.model; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; @@ -35,12 +40,16 @@ public final class ScanEnhancedRequest { private final Integer limit; private final Boolean consistentRead; private final Expression filterExpression; + private final List attributesToProject; private ScanEnhancedRequest(Builder builder) { this.exclusiveStartKey = builder.exclusiveStartKey; this.limit = builder.limit; this.consistentRead = builder.consistentRead; this.filterExpression = builder.filterExpression; + this.attributesToProject = builder.attributesToProject != null + ? Collections.unmodifiableList(builder.attributesToProject) + : null; } /** @@ -57,7 +66,8 @@ public Builder toBuilder() { return builder().exclusiveStartKey(exclusiveStartKey) .limit(limit) .consistentRead(consistentRead) - .filterExpression(filterExpression); + .filterExpression(filterExpression) + .attributesToProject(attributesToProject); } /** @@ -88,6 +98,13 @@ public Expression filterExpression() { return filterExpression; } + /** + * Returns the list of projected attributes on this request object, or null if no projection is specified. + */ + public List attributesToProject() { + return attributesToProject; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -109,6 +126,12 @@ public boolean equals(Object o) { if (consistentRead != null ? ! consistentRead.equals(scan.consistentRead) : scan.consistentRead != null) { return false; } + if (attributesToProject != null + ? ! attributesToProject.equals(scan.attributesToProject) + : scan.attributesToProject != null + ) { + return false; + } return filterExpression != null ? filterExpression.equals(scan.filterExpression) : scan.filterExpression == null; } @@ -118,6 +141,7 @@ public int hashCode() { result = 31 * result + (limit != null ? limit.hashCode() : 0); result = 31 * result + (consistentRead != null ? consistentRead.hashCode() : 0); result = 31 * result + (filterExpression != null ? filterExpression.hashCode() : 0); + result = 31 * result + (attributesToProject != null ? attributesToProject.hashCode() : 0); return result; } @@ -129,6 +153,7 @@ public static final class Builder { private Integer limit; private Boolean consistentRead; private Expression filterExpression; + private List attributesToProject; private Builder() { } @@ -192,6 +217,73 @@ public Builder filterExpression(Expression filterExpression) { return this; } + /** + *

+ * Sets a collection of the attribute names to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + *

+ *

+ * If no attribute names are specified, then all attributes will be returned. If any of the requested attributes + * are not found, they will not appear in the result. + *

+ *

+ * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

+ * @param attributesToProject + * A collection of the attributes names to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder attributesToProject(Collection attributesToProject) { + this.attributesToProject = attributesToProject != null ? new ArrayList<>(attributesToProject) : null; + return this; + } + + /** + *

+ * Sets one or more attribute names to be retrieved from the database. These attributes can include + * scalars, sets, or elements of a JSON document. + *

+ *

+ * If no attribute names are specified, then all attributes will be returned. If any of the requested attributes + * are not found, they will not appear in the result. + *

+ *

+ * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

+ * @param attributesToProject + * One or more attributes names to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder attributesToProject(String... attributesToProject) { + return attributesToProject(Arrays.asList(attributesToProject)); + } + + /** + *

+ * Adds a single attribute name to be retrieved from the database. This attribute can include + * scalars, sets, or elements of a JSON document. + *

+ *

+ * For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide. + *

+ * @param attributeToProject + * An additional single attribute name to be retrieved from the database. + * @return Returns a reference to this object so that method calls can be chained together. + */ + public Builder addAttributeToProject(String attributeToProject) { + if (attributesToProject == null) { + attributesToProject = new ArrayList<>(); + } + attributesToProject.add(attributeToProject); + return this; + } + public ScanEnhancedRequest build() { return new ScanEnhancedRequest(this); } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactGetItemsEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactGetItemsEnhancedRequest.java index b26ce89b5eea..a8e2f22bbde2 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactGetItemsEnhancedRequest.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactGetItemsEnhancedRequest.java @@ -24,8 +24,8 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; import software.amazon.awssdk.enhanced.dynamodb.Key; import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.GetItemOperation; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactableReadOperation; import software.amazon.awssdk.services.dynamodb.model.TransactGetItem; @@ -137,7 +137,7 @@ public TransactGetItemsEnhancedRequest build() { private TransactGetItem generateTransactWriteItem(MappedTableResource mappedTableResource, TransactableReadOperation generator) { return generator.generateTransactGetItem(mappedTableResource.tableSchema(), - OperationContext.create(mappedTableResource.tableName()), + DefaultOperationContext.create(mappedTableResource.tableName()), mappedTableResource.mapperExtension()); } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequest.java index 327bf17cd18e..55298789c04e 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequest.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequest.java @@ -26,8 +26,8 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; import software.amazon.awssdk.enhanced.dynamodb.Key; import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteItemOperation; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.PutItemOperation; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactableWriteOperation; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.UpdateItemOperation; @@ -52,8 +52,11 @@ public final class TransactWriteItemsEnhancedRequest { private final List transactWriteItems; + private final String clientRequestToken; + private TransactWriteItemsEnhancedRequest(Builder builder) { this.transactWriteItems = getItemsFromSupplier(builder.itemSupplierList); + this.clientRequestToken = builder.clientRequestToken; } /** @@ -63,6 +66,26 @@ public static Builder builder() { return new Builder(); } + /** + *

+ * Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, meaning + * that multiple identical calls have the same effect as one single call. + *

+ *

+ * A client request token is valid for 10 minutes after the first request that uses it is completed. After 10 + * minutes, any request with the same client token is treated as a new request. Do not resubmit the same request + * with the same client token for more than 10 minutes, or the result might not be idempotent. + *

+ *

+ * If you submit a request with the same client token but a change in other parameters within the 10-minute + * idempotency window, DynamoDB returns an IdempotentParameterMismatch exception. + *

+ */ + + public String clientRequestToken() { + return clientRequestToken; + } + /** * Returns the list of {@link TransactWriteItem} that represents all actions in the request. */ @@ -97,6 +120,8 @@ public int hashCode() { public static final class Builder { private List> itemSupplierList = new ArrayList<>(); + private String clientRequestToken; + private Builder() { } @@ -250,6 +275,17 @@ public Builder addUpdateItem(MappedTableResource mappedTableResource, T i .build()); } + /** + * Sets the clientRequestToken in this builder. + * + * @param clientRequestToken the clientRequestToken going to be used for build + * @return a builder of this type + */ + public Builder clientRequestToken(String clientRequestToken) { + this.clientRequestToken = clientRequestToken; + return this; + } + /** * Builds a {@link TransactWriteItemsEnhancedRequest} from the values stored in this builder. */ @@ -260,7 +296,7 @@ public TransactWriteItemsEnhancedRequest build() { private TransactWriteItem generateTransactWriteItem(MappedTableResource mappedTableResource, TransactableWriteOperation generator) { return generator.generateTransactWriteItem(mappedTableResource.tableSchema(), - OperationContext.create(mappedTableResource.tableName()), + DefaultOperationContext.create(mappedTableResource.tableName()), mappedTableResource.mapperExtension()); } } diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/UpdateItemEnhancedRequest.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/UpdateItemEnhancedRequest.java index 64a380033c30..c375673bafe1 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/UpdateItemEnhancedRequest.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/UpdateItemEnhancedRequest.java @@ -136,7 +136,7 @@ public Builder ignoreNulls(Boolean ignoreNulls) { /** * Defines a logical expression on an item's attribute values which, if evaluating to true, - * will allow the delete operation to succeed. If evaluating to false, the operation will not succeed. + * will allow the update operation to succeed. If evaluating to false, the operation will not succeed. *

* See {@link Expression} for condition syntax and examples. * diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/WriteBatch.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/WriteBatch.java index 7c747d040c80..4dc8b95b6c75 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/WriteBatch.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/model/WriteBatch.java @@ -27,8 +27,8 @@ import software.amazon.awssdk.enhanced.dynamodb.Key; import software.amazon.awssdk.enhanced.dynamodb.MappedTableResource; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.BatchableWriteOperation; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteItemOperation; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.PutItemOperation; import software.amazon.awssdk.services.dynamodb.model.WriteRequest; @@ -241,7 +241,7 @@ public WriteBatch build() { private WriteRequest generateWriteRequest(Supplier> mappedTableResourceSupplier, BatchableWriteOperation operation) { return operation.generateWriteRequest(mappedTableResourceSupplier.get().tableSchema(), - OperationContext.create(mappedTableResourceSupplier.get().tableName()), + DefaultOperationContext.create(mappedTableResourceSupplier.get().tableName()), mappedTableResourceSupplier.get().mapperExtension()); } } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedTypeTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedTypeTest.java index ddfb891df106..c78ae744a416 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedTypeTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/EnhancedTypeTest.java @@ -17,6 +17,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.assertThatCode; import java.util.Collection; import java.util.Deque; @@ -30,6 +31,8 @@ import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.mapper.StaticTableSchema; + public class EnhancedTypeTest { @Test public void anonymousCreationCapturesComplexTypeArguments() { @@ -207,6 +210,13 @@ public void navigableMapOf_ReturnsRawClassOfNavigableMap_WhenSpecifyingEnhancedT assertThat(type.rawClass()).isEqualTo(NavigableMap.class); assertThat(type.rawClassParameters()).containsExactly(EnhancedType.of(String.class), EnhancedType.of(Integer.class)); } + + @Test + public void documentOf_toString_doesNotRaiseNPE() { + TableSchema tableSchema = StaticTableSchema.builder(String.class).build(); + EnhancedType type = EnhancedType.documentOf(String.class, tableSchema); + assertThatCode(() -> type.toString()).doesNotThrowAnyException(); + } public class InnerType { } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/BooleanAttributeConvertersTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/BooleanAttributeConvertersTest.java index 4582f1454bc4..75c6d2a953e6 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/BooleanAttributeConvertersTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/BooleanAttributeConvertersTest.java @@ -40,6 +40,8 @@ public void atomicBooleanAttributeConverterBehaves() { assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("0").toAttributeValue())); assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1").toAttributeValue())); assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("").toAttributeValue())); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("1").toAttributeValue())).isTrue(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0").toAttributeValue())).isFalse(); assertThat(transformTo(converter, EnhancedAttributeValue.fromString("true").toAttributeValue())).isTrue(); assertThat(transformTo(converter, EnhancedAttributeValue.fromString("false").toAttributeValue())).isFalse(); assertThat(transformTo(converter, EnhancedAttributeValue.fromBoolean(true).toAttributeValue())).isTrue(); @@ -58,6 +60,8 @@ public void booleanAttributeConverterBehaves() { assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("0").toAttributeValue())); assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1").toAttributeValue())); assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("").toAttributeValue())); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("1").toAttributeValue())).isTrue(); + assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0").toAttributeValue())).isFalse(); assertThat(transformTo(converter, EnhancedAttributeValue.fromString("true").toAttributeValue())).isTrue(); assertThat(transformTo(converter, EnhancedAttributeValue.fromString("false").toAttributeValue())).isFalse(); assertThat(transformTo(converter, EnhancedAttributeValue.fromBoolean(true).toAttributeValue())).isTrue(); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/GlobalDateTimeAttributeConvertersTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/GlobalDateTimeAttributeConvertersTest.java deleted file mode 100644 index da0eee8e512c..000000000000 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/GlobalDateTimeAttributeConvertersTest.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; - -import static org.assertj.core.api.Assertions.assertThat; -import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; -import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; -import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; - -import java.time.Instant; -import java.time.OffsetDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.time.temporal.ChronoUnit; -import org.junit.Test; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsIntegerAttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsStringAttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.OffsetDateTimeAsStringAttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ZonedDateTimeAsStringAttributeConverter; - -public class GlobalDateTimeAttributeConvertersTest { - @Test - public void instantAsIntegerAttributeConverterBehaves() { - InstantAsIntegerAttributeConverter converter = InstantAsIntegerAttributeConverter.create(); - - assertThat(transformFrom(converter, Instant.MIN).n()).isEqualTo("-31557014167219200"); - assertThat(transformFrom(converter, Instant.EPOCH.minusMillis(1)).n()).isEqualTo("-0.001"); - assertThat(transformFrom(converter, Instant.EPOCH).n()).isEqualTo("0"); - assertThat(transformFrom(converter, Instant.EPOCH.plusMillis(1)).n()).isEqualTo("0.001"); - assertThat(transformFrom(converter, Instant.MAX).n()).isEqualTo("31556889864403199.999999999"); - - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("-31557014167219201"))); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("0.0000000000"))); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("31556889864403200"))); - - // InstantAsIntegerAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-31557014167219200"))).isEqualTo(Instant.MIN); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-0.001"))).isEqualTo(Instant.EPOCH.minusMillis(1)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber(".0"))).isEqualTo(Instant.EPOCH); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0."))).isEqualTo(Instant.EPOCH); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).isEqualTo(Instant.EPOCH); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0.000"))).isEqualTo(Instant.EPOCH); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0.001"))).isEqualTo(Instant.EPOCH.plusMillis(1)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("31556889864403199.999999999"))).isEqualTo(Instant.MAX); - - // InstantAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z"))).isEqualTo(Instant.EPOCH); - - // OffsetDateTimeAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00"))) - .isEqualTo(Instant.EPOCH.minus(1, ChronoUnit.HOURS)); - - // ZonedDateTimeAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T01:00:00+01:00[Europe/Paris]"))) - .isEqualTo(Instant.EPOCH); - } - - @Test - public void instantAsStringAttributeConverterBehaves() { - InstantAsStringAttributeConverter converter = InstantAsStringAttributeConverter.create(); - - assertThat(transformFrom(converter, Instant.MIN).s()).isEqualTo("-1000000000-01-01T00:00:00Z"); - assertThat(transformFrom(converter, Instant.EPOCH.minusMillis(1)).s()).isEqualTo("1969-12-31T23:59:59.999Z"); - assertThat(transformFrom(converter, Instant.EPOCH).s()).isEqualTo("1970-01-01T00:00:00Z"); - assertThat(transformFrom(converter, Instant.EPOCH.plusMillis(1)).s()).isEqualTo("1970-01-01T00:00:00.001Z"); - assertThat(transformFrom(converter, Instant.MAX).s()).isEqualTo("+1000000000-12-31T23:59:59.999999999Z"); - - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-1000000001-12-31T23:59:59.999999999Z"))); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X"))); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("+1000000001-01-01T00:00:00Z"))); - - // InstantAsIntegerAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).isEqualTo(Instant.EPOCH); - - // InstantAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("-1000000000-01-01T00:00:00Z"))).isEqualTo(Instant.MIN); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1969-12-31T23:59:59.999Z"))).isEqualTo(Instant.EPOCH.minusMillis(1)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z"))).isEqualTo(Instant.EPOCH); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00.001Z"))).isEqualTo(Instant.EPOCH.plusMillis(1)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("+1000000000-12-31T23:59:59.999999999Z"))).isEqualTo(Instant.MAX); - - // OffsetDateTimeAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00"))) - .isEqualTo(Instant.EPOCH.minus(1, ChronoUnit.HOURS)); - - // ZonedDateTimeAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T01:00:00+01:00[Europe/Paris]"))) - .isEqualTo(Instant.EPOCH); - } - - @Test - public void offsetDateTimeAsStringAttributeConverterBehaves() { - OffsetDateTimeAsStringAttributeConverter converter = OffsetDateTimeAsStringAttributeConverter.create(); - - OffsetDateTime epochUtc = Instant.EPOCH.atOffset(ZoneOffset.UTC); - - assertThat(transformFrom(converter, OffsetDateTime.MIN).s()).isEqualTo("-999999999-01-01T00:00:00+18:00"); - assertThat(transformFrom(converter, epochUtc.minusNanos(1)).s()).isEqualTo("1969-12-31T23:59:59.999999999Z"); - assertThat(transformFrom(converter, epochUtc).s()).isEqualTo("1970-01-01T00:00:00Z"); - assertThat(transformFrom(converter, epochUtc.plusNanos(1)).s()).isEqualTo("1970-01-01T00:00:00.000000001Z"); - assertThat(transformFrom(converter, OffsetDateTime.MAX).s()).isEqualTo("+999999999-12-31T23:59:59.999999999-18:00"); - - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-1000000001-12-31T23:59:59.999999999Z"))); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X"))); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("+1000000001-01-01T00:00:00Z"))); - - // InstantAsIntegerAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).isEqualTo(epochUtc); - - // InstantAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z"))).isEqualTo(epochUtc); - - // OffsetDateTimeAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00"))) - .isEqualTo(OffsetDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.ofHours(1))); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("-999999999-01-01T00:00:00+18:00"))) - .isEqualTo(OffsetDateTime.MIN); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1969-12-31T23:59:59.999999999Z"))) - .isEqualTo(epochUtc.minusNanos(1)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z"))) - .isEqualTo(epochUtc); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00.000000001Z"))) - .isEqualTo(epochUtc.plusNanos(1)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("+999999999-12-31T23:59:59.999999999-18:00"))) - .isEqualTo(OffsetDateTime.MAX); - - // ZonedDateTimeAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T01:00:00+01:00[Europe/Paris]"))) - .isEqualTo(Instant.EPOCH.atOffset(ZoneOffset.ofHours(1))); - } - - @Test - public void zonedDateTimeAsStringAttributeConverterBehaves() { - ZonedDateTimeAsStringAttributeConverter converter = ZonedDateTimeAsStringAttributeConverter.create(); - - ZonedDateTime epochUtc = Instant.EPOCH.atZone(ZoneOffset.UTC); - ZonedDateTime min = OffsetDateTime.MIN.toZonedDateTime(); - ZonedDateTime max = OffsetDateTime.MAX.toZonedDateTime(); - - assertThat(transformFrom(converter, min).s()).isEqualTo("-999999999-01-01T00:00:00+18:00"); - assertThat(transformFrom(converter, epochUtc.minusNanos(1)).s()).isEqualTo("1969-12-31T23:59:59.999999999Z"); - assertThat(transformFrom(converter, epochUtc).s()).isEqualTo("1970-01-01T00:00:00Z"); - assertThat(transformFrom(converter, Instant.EPOCH.atZone(ZoneId.of("Europe/Paris"))).s()) - .isEqualTo("1970-01-01T01:00:00+01:00[Europe/Paris]"); - assertThat(transformFrom(converter, epochUtc.plusNanos(1)).s()).isEqualTo("1970-01-01T00:00:00.000000001Z"); - assertThat(transformFrom(converter, max).s()).isEqualTo("+999999999-12-31T23:59:59.999999999-18:00"); - - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-1000000001-12-31T23:59:59.999999999Z"))); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X"))); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("+1000000001-01-01T00:00:00Z"))); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00[FakeZone]"))); - - // InstantAsIntegerAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0"))).isEqualTo(epochUtc); - - // InstantAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z"))).isEqualTo(epochUtc); - - // OffsetDateTimeAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00"))) - .isEqualTo(epochUtc.minus(1, ChronoUnit.HOURS)); - - // ZonedDateTimeAsStringAttributeConverter format - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("-999999999-01-01T00:00:00+18:00"))) - .isEqualTo(min); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T01:00:00+01:00[Europe/Paris]"))) - .isEqualTo(Instant.EPOCH.atZone(ZoneId.of("Europe/Paris"))); - assertThat(transformTo(converter, EnhancedAttributeValue.fromString("+999999999-12-31T23:59:59.999999999-18:00"))) - .isEqualTo(max); - } -} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/InstantAsStringAttributeConvertersTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/InstantAsStringAttributeConvertersTest.java new file mode 100644 index 000000000000..7abccac079fc --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/InstantAsStringAttributeConvertersTest.java @@ -0,0 +1,119 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.Instant; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.InstantAsStringAttributeConverter; + +public class InstantAsStringAttributeConvertersTest { + + private static final InstantAsStringAttributeConverter CONVERTER = InstantAsStringAttributeConverter.create(); + + @Test + public void InstantAsStringAttributeConverterMinTest() { + verifyTransform(Instant.MIN, "-1000000000-01-01T00:00:00Z"); + } + + @Test + public void InstantAsStringAttributeConverterEpochMinusOneMilliTest() { + verifyTransform(Instant.EPOCH.minusMillis(1), "1969-12-31T23:59:59.999Z"); + } + + @Test + public void InstantAsStringAttributeConverterEpochTest() { + verifyTransform(Instant.EPOCH, "1970-01-01T00:00:00Z"); + } + + @Test + public void InstantAsStringAttributeConverterEpochPlusOneMilliTest() { + verifyTransform(Instant.EPOCH.plusMillis(1), "1970-01-01T00:00:00.001Z"); + } + + @Test + public void InstantAsStringAttributeConverterMaxTest() { + verifyTransform(Instant.MAX, "+1000000000-12-31T23:59:59.999999999Z"); + } + + + @Test + public void InstantAsStringAttributeConverterExceedLowerBoundTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("-1000000001-12-31T23:59:59.999999999Z") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterInvalidFormatTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("X") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("+1000000001-01-01T00:00:00Z") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptOffsetTimeTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptZonedTimeTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptLocalDateTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("1988-05-21") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void InstantAsStringAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(CONVERTER, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + private void verifyTransform(Instant objectToTransform, String attributeValueString) { + assertThat(transformFrom(CONVERTER, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(CONVERTER, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateAttributeConverterTest.java new file mode 100644 index 000000000000..ce4b4025464d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateAttributeConverterTest.java @@ -0,0 +1,93 @@ +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.LocalDate; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalDateAttributeConverter; + +public class LocalDateAttributeConverterTest { + + private static LocalDateAttributeConverter converter = LocalDateAttributeConverter.create(); + + @Test + public void LocalDateAttributeConverterMinTest() { + verifyTransform(LocalDate.MIN, "-999999999-01-01"); + } + + @Test + public void LocalDateAttributeConverterNormalTest() { + verifyTransform(LocalDate.of(0, 1, 1), "0000-01-01"); + } + + @Test + public void LocalDateAttributeConverterMaxTest() { + verifyTransform(LocalDate.MAX, "+999999999-12-31"); + } + + + @Test + public void LocalDateAttributeConverterLowerBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-9999999999-01-01") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("9999999999-12-31") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("9999999999-12-32") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptInstantTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001Z") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptOffsetTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptZonedTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void LocalDateAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + private void verifyTransform(LocalDate objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateTimeAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateTimeAttributeConverterTest.java new file mode 100644 index 000000000000..70a68b74c97f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateTimeAttributeConverterTest.java @@ -0,0 +1,114 @@ +/* + * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.LocalDateTime; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalDateTimeAttributeConverter; + +public class LocalDateTimeAttributeConverterTest { + + private static LocalDateTimeAttributeConverter converter = LocalDateTimeAttributeConverter.create(); + + @Test + public void localDateTimeAttributeConverterMinTest() { + verifyTransform(LocalDateTime.MIN, "-999999999-01-01T00:00"); + } + + @Test + public void localDateTimeAttributeConverterNormalTest() { + verifyTransform(LocalDateTime.of(0, 1, 1, 0, 0, 0, 0), "0000-01-01T00:00"); + } + + @Test + public void localDateTimeAttributeConverterMaxTest() { + verifyTransform(LocalDateTime.MAX, "+999999999-12-31T23:59:59.999999999"); + } + + + @Test + public void localDateTimeAttributeConverterLowerBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-9999999999-01-01T00:00") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("9999999999-12-31T00:00:00") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("9999999999-12-32T00:00:00") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterInvalidNanoSecondsTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("0-01-01T00:00:00.9999999999") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterNotAcceptInstantTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001Z") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterNotAcceptOffsetTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterNotAcceptZonedTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + @Test + public void localDateTimeAttributeConverterAdditionallyAcceptLocalDateTest() { + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21").toAttributeValue())) + .isEqualTo(LocalDateTime.of(1988, 5, 21, 0, 0, 0)); + } + + private void verifyTransform(LocalDateTime objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateTimeAttributeConvertersTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateTimeAttributeConvertersTest.java deleted file mode 100644 index 32d4c1230d06..000000000000 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalDateTimeAttributeConvertersTest.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; - -import static org.assertj.core.api.Assertions.assertThat; -import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; -import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; -import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; - -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.LocalTime; -import java.time.MonthDay; -import org.junit.Test; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalDateAttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalDateTimeAttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalTimeAttributeConverter; -import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.MonthDayAttributeConverter; - -public class LocalDateTimeAttributeConvertersTest { - @Test - public void localDateAttributeConverterBehaves() { - LocalDateAttributeConverter converter = LocalDateAttributeConverter.create(); - - assertThat(transformFrom(converter, LocalDate.MIN).n()).isEqualTo("-9999999990101000000"); - assertThat(transformFrom(converter, LocalDate.of(0, 1, 1)).n()).isEqualTo("00101000000"); - assertThat(transformFrom(converter, LocalDate.MAX).n()).isEqualTo("9999999991231000000"); - - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("-99999999990101000000").toAttributeValue())); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("99999999991231000000").toAttributeValue())); - - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-9999999990101000000").toAttributeValue())) - .isEqualTo(LocalDate.MIN); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("00101000000").toAttributeValue())).isEqualTo(LocalDate.of(0, 1, 1)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("9999999991231000000").toAttributeValue())) - .isEqualTo(LocalDate.MAX); - } - - @Test - public void localDateTimeAttributeConverterBehaves() { - LocalDateTimeAttributeConverter converter = LocalDateTimeAttributeConverter.create(); - - assertThat(transformFrom(converter, LocalDateTime.MIN).n()).isEqualTo("-9999999990101000000"); - assertThat(transformFrom(converter, LocalDateTime.of(0, 1, 1, 0, 0, 0, 0)).n()).isEqualTo("00101000000"); - assertThat(transformFrom(converter, LocalDateTime.MAX).n()).isEqualTo("9999999991231235959.999999999"); - - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("-99999999990101000000").toAttributeValue())); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("99999999991231000000").toAttributeValue())); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("99999999991232000000").toAttributeValue())); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("00101000000.9999999999").toAttributeValue())); - - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("-9999999990101000000").toAttributeValue())) - .isEqualTo(LocalDateTime.MIN); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("00101000000").toAttributeValue())) - .isEqualTo(LocalDateTime.of(0, 1, 1, 0, 0, 0, 0)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("9999999991231235959.999999999").toAttributeValue())) - .isEqualTo(LocalDateTime.MAX); - } - - @Test - public void localTimeAttributeConverterBehaves() { - LocalTimeAttributeConverter converter = LocalTimeAttributeConverter.create(); - - assertThat(transformFrom(converter, LocalTime.MIN).n()).isEqualTo("000000"); - assertThat(transformFrom(converter, LocalTime.of(1, 2, 3, 4)).n()).isEqualTo("010203.000000004"); - assertThat(transformFrom(converter, LocalTime.MAX).n()).isEqualTo("235959.999999999"); - - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("-1").toAttributeValue())); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("240000").toAttributeValue())); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("000000.9999999999").toAttributeValue())); - - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("000000").toAttributeValue())) - .isEqualTo(LocalTime.MIN); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("010203.000000004").toAttributeValue())) - .isEqualTo(LocalTime.of(1, 2, 3, 4)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("235959.999999999").toAttributeValue())) - .isEqualTo(LocalTime.MAX); - } - - @Test - public void monthDayAttributeConverterBehaves() { - MonthDayAttributeConverter converter = MonthDayAttributeConverter.create(); - - assertThat(transformFrom(converter, MonthDay.of(1, 1)).n()).isEqualTo("0101"); - assertThat(transformFrom(converter, MonthDay.of(5, 21)).n()).isEqualTo("0521"); - assertThat(transformFrom(converter, MonthDay.of(12, 31)).n()).isEqualTo("1231"); - - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("X").toAttributeValue())); - assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromNumber("0230").toAttributeValue())); - - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0101").toAttributeValue())).isEqualTo(MonthDay.of(1, 1)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("0521").toAttributeValue())).isEqualTo(MonthDay.of(5, 21)); - assertThat(transformTo(converter, EnhancedAttributeValue.fromNumber("1231").toAttributeValue())).isEqualTo(MonthDay.of(12, 31)); - } -} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalTimeAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalTimeAttributeConverterTest.java new file mode 100644 index 000000000000..584b8614841d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/LocalTimeAttributeConverterTest.java @@ -0,0 +1,93 @@ +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.LocalTime; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.LocalTimeAttributeConverter; + +public class LocalTimeAttributeConverterTest { + + private static LocalTimeAttributeConverter converter = LocalTimeAttributeConverter.create(); + + @Test + public void LocalTimeAttributeConverterMinTest() { + verifyTransform(LocalTime.MIN, "00:00"); + } + + @Test + public void LocalTimeAttributeConverterNormalTest() { + verifyTransform(LocalTime.of(1, 2, 3, 4), "01:02:03.000000004"); + } + + @Test + public void LocalTimeAttributeConverterMaxTest() { + verifyTransform(LocalTime.MAX, "23:59:59.999999999"); + } + + + @Test + public void LocalTimeAttributeConverterInvalidFormatTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-1") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("24:00:00") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterInvalidNanoSecondsTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:00:00.9999999999") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptInstantTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001Z") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptOffsetTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptZonedTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptLocalDateTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21") + .toAttributeValue())); + } + + @Test + public void LocalTimeAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + private void verifyTransform(LocalTime objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/MonthDayAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/MonthDayAttributeConverterTest.java new file mode 100644 index 000000000000..7e972ae32a87 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/MonthDayAttributeConverterTest.java @@ -0,0 +1,88 @@ +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.MonthDay; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.MonthDayAttributeConverter; + +public class MonthDayAttributeConverterTest { + + private static MonthDayAttributeConverter converter = MonthDayAttributeConverter.create(); + + @Test + public void MonthDayAttributeConverterMinTest() { + verifyTransform(MonthDay.of(1, 1), "--01-01"); + } + + @Test + public void MonthDayAttributeConverterNormalTest() { + verifyTransform(MonthDay.of(5, 21), "--05-21"); + } + + @Test + public void MonthDayAttributeConverterMaxTest() { + verifyTransform(MonthDay.of(12, 31), "--12-31"); + } + + + @Test + public void MonthDayAttributeConverterInvalidFormatTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterInvalidDateTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("--02-30") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptInstantTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001Z") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptOffsetTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptZonedTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptLocalDateTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21") + .toAttributeValue())); + } + + @Test + public void MonthDayAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + private void verifyTransform(MonthDay objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/OffsetDateTimeAsStringAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/OffsetDateTimeAsStringAttributeConverterTest.java new file mode 100644 index 000000000000..d7839a99d948 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/OffsetDateTimeAsStringAttributeConverterTest.java @@ -0,0 +1,112 @@ +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.OffsetDateTimeAsStringAttributeConverter; + +public class OffsetDateTimeAsStringAttributeConverterTest { + + private static OffsetDateTimeAsStringAttributeConverter converter = OffsetDateTimeAsStringAttributeConverter.create(); + + private static OffsetDateTime epochUtc = Instant.EPOCH.atOffset(ZoneOffset.UTC); + + @Test + public void OffsetDateTimeAsStringAttributeConverterMinTest() { + verifyTransform(OffsetDateTime.MIN, "-999999999-01-01T00:00+18:00"); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterEpochMinusOneMilliTest() { + verifyTransform(epochUtc.minusNanos(1), "1969-12-31T23:59:59.999999999Z"); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterEpochTest() { + verifyTransform(epochUtc, "1970-01-01T00:00Z"); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterEpochPlusOneMilliTest() { + verifyTransform(epochUtc.plusNanos(1), "1970-01-01T00:00:00.000000001Z"); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterMaxTest() { + verifyTransform(OffsetDateTime.MAX, "+999999999-12-31T23:59:59.999999999-18:00"); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterNormalOffsetTest() { + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00"))) + .isEqualTo(OffsetDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.ofHours(1))); + } + + + @Test + public void OffsetDateTimeAsStringAttributeConverterExceedLowerBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-1000000001-12-31T23:59:59.999999999Z") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterInvalidFormatTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("+1000000001-01-01T00:00:00Z") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAsStringAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterNotAcceptTimeZoneNamedZonedTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00+01:00[Europe/Paris]") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterNotAcceptLocalDateTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + @Test + public void OffsetDateTimeAsStringAttributeConverterAdditionallyAcceptInstantTest() { + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z"))).isEqualTo(epochUtc); + } + + private void verifyTransform(OffsetDateTime objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/ZonedDateTimeAsStringAttributeConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/ZonedDateTimeAsStringAttributeConverterTest.java new file mode 100644 index 000000000000..3f44c03e2824 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/converters/attribute/ZonedDateTimeAsStringAttributeConverterTest.java @@ -0,0 +1,132 @@ +package software.amazon.awssdk.enhanced.dynamodb.converters.attribute; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.assertFails; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformFrom; +import static software.amazon.awssdk.enhanced.dynamodb.converters.attribute.ConverterTestUtils.transformTo; + +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoUnit; +import org.junit.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.ZonedDateTimeAsStringAttributeConverter; + +public class ZonedDateTimeAsStringAttributeConverterTest { + + private static ZonedDateTimeAsStringAttributeConverter converter = ZonedDateTimeAsStringAttributeConverter.create(); + + private static ZonedDateTime epochUtc = Instant.EPOCH.atZone(ZoneOffset.UTC); + private static ZonedDateTime min = OffsetDateTime.MIN.toZonedDateTime(); + private static ZonedDateTime max = OffsetDateTime.MAX.toZonedDateTime(); + + @Test + public void ZonedDateTimeAsStringAttributeConverterMinTest() { + verifyTransform(min, "-999999999-01-01T00:00+18:00"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterEpochMinusOneMilliTest() { + verifyTransform(epochUtc.minusNanos(1), "1969-12-31T23:59:59.999999999Z"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterEpochTest() { + verifyTransform(epochUtc, "1970-01-01T00:00Z"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterEpochPlusOneMilliTest() { + verifyTransform(epochUtc.plusNanos(1), "1970-01-01T00:00:00.000000001Z"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterMaxTest() { + verifyTransform(max, "+999999999-12-31T23:59:59.999999999-18:00"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterTimeZoneAtParisTest() { + verifyTransform(Instant.EPOCH.atZone(ZoneId.of("Europe/Paris")), "1970-01-01T01:00+01:00[Europe/Paris]"); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterNormalOffsetTest() { + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00"))) + .isEqualTo(ZonedDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.ofHours(1))); + } + + + @Test + public void ZonedDateTimeAsStringAttributeConverterExceedLowerBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("-1000000001-12-31T23:59:59.999999999Z") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterInvalidFormatTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("X") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterExceedHigherBoundTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("+1000000001-01-01T00:00:00Z") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterFakeZoneTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00[FakeZone]") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterNotAcceptLocalDateTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21T00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterNotAcceptLocalDateTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("1988-05-21") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterNotAcceptLocalTimeTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("00:12:00.000000001") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterNotAcceptMonthDayTest() { + assertFails(() -> transformTo(converter, EnhancedAttributeValue.fromString("05-21") + .toAttributeValue())); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterAdditionallyAcceptInstantTest() { + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00Z"))).isEqualTo(epochUtc); + } + + @Test + public void ZonedDateTimeAsStringAttributeConverterAdditionallyAcceptOffsetDateTimeTest() { + // To make sure the specific zone converter is selected, here a specific Zoned converter is used. + ZonedDateTimeAsStringAttributeConverter converter = ZonedDateTimeAsStringAttributeConverter.create(); + + assertThat(transformTo(converter, EnhancedAttributeValue.fromString("1970-01-01T00:00:00+01:00"))) + .isEqualTo(epochUtc.minus(1, ChronoUnit.HOURS)); + } + + private void verifyTransform(ZonedDateTime objectToTransform, String attributeValueString) { + assertThat(transformFrom(converter, objectToTransform)) + .isEqualTo(EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue()); + assertThat(transformTo(converter, EnhancedAttributeValue.fromString(attributeValueString).toAttributeValue())) + .isEqualTo(objectToTransform); + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/ChainExtensionTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/ChainExtensionTest.java index 6a37e88164ba..66d0b0569613 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/ChainExtensionTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/ChainExtensionTest.java @@ -37,18 +37,19 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.ChainExtension; import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @RunWith(MockitoJUnitRunner.class) public class ChainExtensionTest { private static final String TABLE_NAME = "concrete-table-name"; private static final OperationContext PRIMARY_CONTEXT = - OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); private static final Map ATTRIBUTE_VALUES_1 = Collections.unmodifiableMap(Collections.singletonMap("key1", AttributeValue.builder().s("1").build())); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java index ea03d46a8b4b..b12ae9a8a18c 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java @@ -25,17 +25,18 @@ import java.util.Map; import org.junit.Test; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; public class VersionedRecordExtensionTest { private static final String TABLE_NAME = "table-name"; private static final OperationContext PRIMARY_CONTEXT = - OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); private final VersionedRecordExtension versionedRecordExtension = VersionedRecordExtension.builder().build(); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicCrudTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicCrudTest.java index 29c808ba0a37..205d9ec86fe2 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicCrudTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicCrudTest.java @@ -45,6 +45,8 @@ import software.amazon.awssdk.services.dynamodb.model.ProjectionType; public class BasicCrudTest extends LocalDynamoDbSyncTestBase { + private static final String ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS = "a*t:t.r-i#bute3"; + private static class Record { private String id; private String sort; @@ -181,7 +183,7 @@ public int hashCode() { .getter(Record::getAttribute2) .setter(Record::setAttribute2) .tags(secondaryPartitionKey("gsi_1"))) - .addAttribute(String.class, a -> a.name("attribute3") + .addAttribute(String.class, a -> a.name(ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) .getter(Record::getAttribute3) .setter(Record::setAttribute3) .tags(secondarySortKey("gsi_1"))) @@ -350,7 +352,7 @@ public void putWithConditionThatSucceeds() { Expression conditionExpression = Expression.builder() .expression("#key = :value OR #key1 = :value1") .putExpressionName("#key", "attribute") - .putExpressionName("#key1", "attribute3") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) .putExpressionValue(":value", stringValue("wrong")) .putExpressionValue(":value1", stringValue("three")) .build(); @@ -378,7 +380,7 @@ public void putWithConditionThatFails() { Expression conditionExpression = Expression.builder() .expression("#key = :value OR #key1 = :value1") .putExpressionName("#key", "attribute") - .putExpressionName("#key1", "attribute3") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) .putExpressionValue(":value", stringValue("wrong")) .putExpressionValue(":value1", stringValue("wrong")) .build(); @@ -409,7 +411,7 @@ public void deleteWithConditionThatSucceeds() { Expression conditionExpression = Expression.builder() .expression("#key = :value OR #key1 = :value1") .putExpressionName("#key", "attribute") - .putExpressionName("#key1", "attribute3") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) .putExpressionValue(":value", stringValue("wrong")) .putExpressionValue(":value1", stringValue("three")) .build(); @@ -435,7 +437,7 @@ public void deleteWithConditionThatFails() { Expression conditionExpression = Expression.builder() .expression("#key = :value OR #key1 = :value1") .putExpressionName("#key", "attribute") - .putExpressionName("#key1", "attribute3") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) .putExpressionValue(":value", stringValue("wrong")) .putExpressionValue(":value1", stringValue("wrong")) .build(); @@ -598,7 +600,7 @@ public void updateWithConditionThatSucceeds() { Expression conditionExpression = Expression.builder() .expression("#key = :value OR #key1 = :value1") .putExpressionName("#key", "attribute") - .putExpressionName("#key1", "attribute3") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) .putExpressionValue(":value", stringValue("wrong")) .putExpressionValue(":value1", stringValue("three")) .build(); @@ -627,7 +629,7 @@ public void updateWithConditionThatFails() { Expression conditionExpression = Expression.builder() .expression("#key = :value OR #key1 = :value1") .putExpressionName("#key", "attribute") - .putExpressionName("#key1", "attribute3") + .putExpressionName("#key1", ATTRIBUTE_NAME_WITH_SPECIAL_CHARACTERS) .putExpressionValue(":value", stringValue("wrong")) .putExpressionValue(":value1", stringValue("wrong")) .build(); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicQueryTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicQueryTest.java index 96f87123a89c..b60efcdc870f 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicQueryTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicQueryTest.java @@ -17,6 +17,7 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; @@ -157,6 +158,28 @@ public void queryAllRecordsDefaultSettings_shortcutForm() { assertThat(page.lastEvaluatedKey(), is(nullValue())); } + @Test + public void queryAllRecordsDefaultSettings_withProjection() { + insertRecords(); + + Iterator> results = + mappedTable.query(b -> b + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .attributesToProject("value") + ).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items().size(), is(RECORDS.size())); + + Record firstRecord = page.items().get(0); + assertThat(firstRecord.id, is(nullValue())); + assertThat(firstRecord.sort, is(nullValue())); + assertThat(firstRecord.value, is(0)); + } + @Test public void queryAllRecordsDefaultSettings_shortcutForm_viaItems() { insertRecords(); @@ -195,6 +218,39 @@ public void queryAllRecordsWithFilter() { assertThat(page.lastEvaluatedKey(), is(nullValue())); } + @Test + public void queryAllRecordsWithFilterAndProjection() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#value >= :min_value AND #value <= :max_value") + .expressionValues(expressionValues) + .expressionNames(Collections.singletonMap("#value", "value")) + .build(); + + Iterator> results = + mappedTable.query(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue("id-value"))) + .filterExpression(expression) + .attributesToProject("value") + .build()) + .iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), hasSize(3)); + assertThat(page.lastEvaluatedKey(), is(nullValue())); + + Record record = page.items().get(0); + assertThat(record.id, nullValue()); + assertThat(record.sort, nullValue()); + assertThat(record.value, is(3)); + } + @Test public void queryBetween() { insertRecords(); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicScanTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicScanTest.java index e4a88653aea1..caa6e855ed18 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicScanTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/BasicScanTest.java @@ -17,6 +17,7 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; @@ -140,6 +141,24 @@ public void scanAllRecordsDefaultSettings() { assertThat(page.lastEvaluatedKey(), is(nullValue())); } + @Test + public void queryAllRecordsDefaultSettings_withProjection() { + insertRecords(); + + Iterator> results = + mappedTable.scan(b -> b.attributesToProject("sort")).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items().size(), is(RECORDS.size())); + + Record firstRecord = page.items().get(0); + assertThat(firstRecord.id, is(nullValue())); + assertThat(firstRecord.sort, is(0)); + } + @Test public void scanAllRecordsDefaultSettings_viaItems() { insertRecords(); @@ -171,6 +190,38 @@ public void scanAllRecordsWithFilter() { assertThat(page.lastEvaluatedKey(), is(nullValue())); } + @Test + public void scanAllRecordsWithFilterAndProjection() { + insertRecords(); + Map expressionValues = new HashMap<>(); + expressionValues.put(":min_value", numberValue(3)); + expressionValues.put(":max_value", numberValue(5)); + Expression expression = Expression.builder() + .expression("#sort >= :min_value AND #sort <= :max_value") + .expressionValues(expressionValues) + .putExpressionName("#sort", "sort") + .build(); + + Iterator> results = + mappedTable.scan( + ScanEnhancedRequest.builder() + .attributesToProject("sort") + .filterExpression(expression) + .build() + ).iterator(); + + assertThat(results.hasNext(), is(true)); + Page page = results.next(); + assertThat(results.hasNext(), is(false)); + + assertThat(page.items(), hasSize(3)); + + Record record = page.items().get(0); + + assertThat(record.id, is(nullValue())); + assertThat(record.sort, is(3)); + } + @Test public void scanLimit() { insertRecords(); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyBinaryTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyBinaryTest.java new file mode 100644 index 000000000000..2d8ac8e2088d --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyBinaryTest.java @@ -0,0 +1,176 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static java.util.Collections.singletonMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.HashMap; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; +import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; +import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; +import software.amazon.awssdk.services.dynamodb.model.ReturnValue; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; + +@RunWith(MockitoJUnitRunner.class) +public class EmptyBinaryTest { + private static final String TABLE_NAME = "TEST_TABLE"; + private static final SdkBytes EMPTY_BYTES = SdkBytes.fromUtf8String(""); + private static final AttributeValue EMPTY_BINARY = AttributeValue.builder().b(EMPTY_BYTES).build(); + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private DynamoDbTable dynamoDbTable; + + @DynamoDbBean + public static class TestBean { + private String id; + private SdkBytes b; + + @DynamoDbPartitionKey + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public SdkBytes getB() { + return b; + } + + public void setB(SdkBytes b) { + this.b = b; + } + } + + private static final TableSchema TABLE_SCHEMA = TableSchema.fromBean(TestBean.class); + + @Before + public void initializeTable() { + DynamoDbEnhancedClient dynamoDbEnhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(mockDynamoDbClient) + .build(); + + this.dynamoDbTable = dynamoDbEnhancedClient.table(TABLE_NAME, TABLE_SCHEMA); + } + + @Test + public void putEmptyBytes() { + TestBean testBean = new TestBean(); + testBean.setId("id123"); + testBean.setB(EMPTY_BYTES); + + dynamoDbTable.putItem(testBean); + + Map expectedItemMap = new HashMap<>(); + expectedItemMap.put("id", AttributeValue.builder().s("id123").build()); + expectedItemMap.put("b", EMPTY_BINARY); + + PutItemRequest expectedRequest = PutItemRequest.builder() + .tableName(TABLE_NAME) + .item(expectedItemMap) + .build(); + + verify(mockDynamoDbClient).putItem(expectedRequest); + } + + @Test + public void getEmptyBytes() { + Map itemMap = new HashMap<>(); + itemMap.put("id", AttributeValue.builder().s("id123").build()); + itemMap.put("b", EMPTY_BINARY); + + GetItemResponse response = GetItemResponse.builder() + .item(itemMap) + .build(); + + when(mockDynamoDbClient.getItem(any(GetItemRequest.class))).thenReturn(response); + + TestBean result = dynamoDbTable.getItem(r -> r.key(k -> k.partitionValue("id123"))); + + assertThat(result.getId()).isEqualTo("id123"); + assertThat(result.getB()).isEqualTo(EMPTY_BYTES); + } + + @Test + public void updateEmptyBytesWithCondition() { + Map expectedItemMap = new HashMap<>(); + expectedItemMap.put("id", AttributeValue.builder().s("id123").build()); + expectedItemMap.put("b", EMPTY_BINARY); + TestBean testBean = new TestBean(); + testBean.setId("id123"); + testBean.setB(EMPTY_BYTES); + + UpdateItemResponse response = UpdateItemResponse.builder() + .attributes(expectedItemMap) + .build(); + when(mockDynamoDbClient.updateItem(any(UpdateItemRequest.class))).thenReturn(response); + + Expression conditionExpression = Expression.builder() + .expression("#attr = :val") + .expressionNames(singletonMap("#attr", "b")) + .expressionValues(singletonMap(":val", EMPTY_BINARY)) + .build(); + + TestBean result = dynamoDbTable.updateItem(r -> r.item(testBean).conditionExpression(conditionExpression)); + + Map expectedExpressionAttributeNames = new HashMap<>(); + expectedExpressionAttributeNames.put("#AMZN_MAPPED_b", "b"); + expectedExpressionAttributeNames.put("#attr", "b"); + Map expectedExpressionAttributeValues = new HashMap<>(); + expectedExpressionAttributeValues.put(":AMZN_MAPPED_b", EMPTY_BINARY); + expectedExpressionAttributeValues.put(":val", EMPTY_BINARY); + Map expectedKeyMap = new HashMap<>(); + expectedKeyMap.put("id", AttributeValue.builder().s("id123").build()); + + UpdateItemRequest expectedRequest = + UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .key(expectedKeyMap) + .returnValues(ReturnValue.ALL_NEW) + .updateExpression("SET #AMZN_MAPPED_b = :AMZN_MAPPED_b") + .conditionExpression("#attr = :val") + .expressionAttributeNames(expectedExpressionAttributeNames) + .expressionAttributeValues(expectedExpressionAttributeValues) + .build(); + + verify(mockDynamoDbClient).updateItem(expectedRequest); + assertThat(result.getId()).isEqualTo("id123"); + assertThat(result.getB()).isEqualTo(EMPTY_BYTES); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyStringTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyStringTest.java new file mode 100644 index 000000000000..c6190ffec48f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/EmptyStringTest.java @@ -0,0 +1,174 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.functionaltests; + +import static java.util.Collections.singletonMap; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.HashMap; +import java.util.Map; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; +import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.DynamoDbClient; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; +import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; +import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; +import software.amazon.awssdk.services.dynamodb.model.ReturnValue; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; +import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; + +@RunWith(MockitoJUnitRunner.class) +public class EmptyStringTest { + private static final String TABLE_NAME = "TEST_TABLE"; + private static final AttributeValue EMPTY_STRING = AttributeValue.builder().s("").build(); + + @Mock + private DynamoDbClient mockDynamoDbClient; + + private DynamoDbTable dynamoDbTable; + + @DynamoDbBean + public static class TestBean { + private String id; + private String s; + + @DynamoDbPartitionKey + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getS() { + return s; + } + + public void setS(String s) { + this.s = s; + } + } + + private static final TableSchema TABLE_SCHEMA = TableSchema.fromBean(TestBean.class); + + @Before + public void initializeTable() { + DynamoDbEnhancedClient dynamoDbEnhancedClient = DynamoDbEnhancedClient.builder() + .dynamoDbClient(mockDynamoDbClient) + .build(); + + this.dynamoDbTable = dynamoDbEnhancedClient.table(TABLE_NAME, TABLE_SCHEMA); + } + + @Test + public void putEmptyString() { + TestBean testBean = new TestBean(); + testBean.setId("id123"); + testBean.setS(""); + + dynamoDbTable.putItem(testBean); + + Map expectedItemMap = new HashMap<>(); + expectedItemMap.put("id", AttributeValue.builder().s("id123").build()); + expectedItemMap.put("s", EMPTY_STRING); + + PutItemRequest expectedRequest = PutItemRequest.builder() + .tableName(TABLE_NAME) + .item(expectedItemMap) + .build(); + + verify(mockDynamoDbClient).putItem(expectedRequest); + } + + @Test + public void getEmptyString() { + Map itemMap = new HashMap<>(); + itemMap.put("id", AttributeValue.builder().s("id123").build()); + itemMap.put("s", EMPTY_STRING); + + GetItemResponse response = GetItemResponse.builder() + .item(itemMap) + .build(); + + when(mockDynamoDbClient.getItem(any(GetItemRequest.class))).thenReturn(response); + + TestBean result = dynamoDbTable.getItem(r -> r.key(k -> k.partitionValue("id123"))); + + assertThat(result.getId()).isEqualTo("id123"); + assertThat(result.getS()).isEmpty(); + } + + @Test + public void updateEmptyStringWithCondition() { + Map expectedItemMap = new HashMap<>(); + expectedItemMap.put("id", AttributeValue.builder().s("id123").build()); + expectedItemMap.put("s", EMPTY_STRING); + TestBean testBean = new TestBean(); + testBean.setId("id123"); + testBean.setS(""); + + UpdateItemResponse response = UpdateItemResponse.builder() + .attributes(expectedItemMap) + .build(); + when(mockDynamoDbClient.updateItem(any(UpdateItemRequest.class))).thenReturn(response); + + Expression conditionExpression = Expression.builder() + .expression("#attr = :val") + .expressionNames(singletonMap("#attr", "s")) + .expressionValues(singletonMap(":val", EMPTY_STRING)) + .build(); + + TestBean result = dynamoDbTable.updateItem(r -> r.item(testBean).conditionExpression(conditionExpression)); + + Map expectedExpressionAttributeNames = new HashMap<>(); + expectedExpressionAttributeNames.put("#AMZN_MAPPED_s", "s"); + expectedExpressionAttributeNames.put("#attr", "s"); + Map expectedExpressionAttributeValues = new HashMap<>(); + expectedExpressionAttributeValues.put(":AMZN_MAPPED_s", EMPTY_STRING); + expectedExpressionAttributeValues.put(":val", EMPTY_STRING); + Map expectedKeyMap = new HashMap<>(); + expectedKeyMap.put("id", AttributeValue.builder().s("id123").build()); + + UpdateItemRequest expectedRequest = + UpdateItemRequest.builder() + .tableName(TABLE_NAME) + .key(expectedKeyMap) + .returnValues(ReturnValue.ALL_NEW) + .updateExpression("SET #AMZN_MAPPED_s = :AMZN_MAPPED_s") + .conditionExpression("#attr = :val") + .expressionAttributeNames(expectedExpressionAttributeNames) + .expressionAttributeValues(expectedExpressionAttributeValues) + .build(); + + verify(mockDynamoDbClient).updateItem(expectedRequest); + assertThat(result.getId()).isEqualTo("id123"); + assertThat(result.getS()).isEmpty(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocumentTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocumentTest.java index 2191e04f6782..547927b09105 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocumentTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/DefaultDocumentTest.java @@ -20,7 +20,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -37,7 +36,7 @@ import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.internal.extensions.DefaultDynamoDbExtensionContext; -import software.amazon.awssdk.enhanced.dynamodb.internal.operations.OperationContext; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DefaultOperationContext; import software.amazon.awssdk.services.dynamodb.DynamoDbClient; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @@ -83,7 +82,7 @@ public void extension_mapsToItem() { assertThat(defaultDocument.getItem(mappedTable), is(fakeItem2)); verify(mockDynamoDbEnhancedClientExtension).afterRead(DefaultDynamoDbExtensionContext.builder() .tableMetadata(FakeItem.getTableMetadata()) - .operationContext(OperationContext.create(mappedTable.tableName())) + .operationContext(DefaultOperationContext.create(mappedTable.tableName())) .items(fakeItemMap).build() ); } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtilsTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtilsTest.java index 9ebe94cb28ec..adb51a92cdab 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtilsTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/EnhancedClientUtilsTest.java @@ -57,4 +57,11 @@ public void createKeyFromMap_partitionAndSort() { assertThat(key.partitionKeyValue()).isEqualTo(PARTITION_VALUE); assertThat(key.sortKeyValue()).isEqualTo(Optional.of(SORT_VALUE)); } + + @Test + public void cleanAttributeName_cleansSpecialCharacters() { + String result = EnhancedClientUtils.cleanAttributeName("a*b.c-d:e#f"); + + assertThat(result).isEqualTo("a_b_c_d_e_f"); + } } \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProviderTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProviderTest.java new file mode 100644 index 000000000000..069ae50a2d4b --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ChainConverterProviderTest.java @@ -0,0 +1,75 @@ +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import java.util.List; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; + +@RunWith(MockitoJUnitRunner.class) +public class ChainConverterProviderTest { + + @Mock + private AttributeConverterProvider mockConverterProvider1; + + @Mock + private AttributeConverterProvider mockConverterProvider2; + + @Mock + private AttributeConverter mockAttributeConverter1; + + @Mock + private AttributeConverter mockAttributeConverter2; + + @Test + public void checkSingleProviderChain() { + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1); + List providerQueue = chain.chainedProviders(); + assertThat(providerQueue.size()).isEqualTo(1); + assertThat(providerQueue.get(0)).isEqualTo(mockConverterProvider1); + } + + @Test + public void checkMultipleProviderChain() { + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1, mockConverterProvider2); + List providerQueue = chain.chainedProviders(); + assertThat(providerQueue.size()).isEqualTo(2); + assertThat(providerQueue.get(0)).isEqualTo(mockConverterProvider1); + assertThat(providerQueue.get(1)).isEqualTo(mockConverterProvider2); + } + + @Test + public void resolveSingleProviderChain() { + when(mockConverterProvider1.converterFor(any())).thenReturn(mockAttributeConverter1); + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1); + assertThat(chain.converterFor(EnhancedType.of(String.class))).isSameAs(mockAttributeConverter1); + } + + @Test + public void resolveMultipleProviderChain_noMatch() { + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1, mockConverterProvider2); + assertThat(chain.converterFor(EnhancedType.of(String.class))).isNull(); + } + + @Test + public void resolveMultipleProviderChain_matchSecond() { + when(mockConverterProvider2.converterFor(any())).thenReturn(mockAttributeConverter2); + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1, mockConverterProvider2); + assertThat(chain.converterFor(EnhancedType.of(String.class))).isSameAs(mockAttributeConverter2); + } + + @Test + public void resolveMultipleProviderChain_matchFirst() { + when(mockConverterProvider1.converterFor(any())).thenReturn(mockAttributeConverter1); + ChainConverterProvider chain = ChainConverterProvider.create(mockConverterProvider1, mockConverterProvider2); + assertThat(chain.converterFor(EnhancedType.of(String.class))).isSameAs(mockAttributeConverter1); + } + +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolverTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolverTest.java new file mode 100644 index 000000000000..0cd3294c085a --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/ConverterProviderResolverTest.java @@ -0,0 +1,55 @@ +package software.amazon.awssdk.enhanced.dynamodb.internal.converter; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.DefaultAttributeConverterProvider; + +@RunWith(MockitoJUnitRunner.class) +public class ConverterProviderResolverTest { + + @Mock + private AttributeConverterProvider mockConverterProvider1; + + @Mock + private AttributeConverterProvider mockConverterProvider2; + + @Test + public void resolveProviders_null() { + assertThat(ConverterProviderResolver.resolveProviders(null)).isNull(); + } + + @Test + public void resolveProviders_empty() { + assertThat(ConverterProviderResolver.resolveProviders(emptyList())).isNull(); + } + + @Test + public void resolveProviders_singleton() { + assertThat(ConverterProviderResolver.resolveProviders(singletonList(mockConverterProvider1))) + .isSameAs(mockConverterProvider1); + } + + @Test + public void resolveProviders_multiple() { + AttributeConverterProvider result = ConverterProviderResolver.resolveProviders( + Arrays.asList(mockConverterProvider1, mockConverterProvider2)); + assertThat(result).isNotNull(); + assertThat(result).isInstanceOf(ChainConverterProvider.class); + } + + @Test + public void defaultProvider_returnsInstance() { + AttributeConverterProvider defaultProvider = ConverterProviderResolver.defaultConverterProvider(); + assertThat(defaultProvider).isNotNull(); + assertThat(defaultProvider).isInstanceOf(DefaultAttributeConverterProvider.class); + } + +} \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeValueConverterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeValueConverterTest.java new file mode 100644 index 000000000000..8cb798e71b7e --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/converter/attribute/OptionalAttributeValueConverterTest.java @@ -0,0 +1,33 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute; + +import org.junit.Test; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +public class OptionalAttributeValueConverterTest { + private static final OptionalAttributeConverter CONVERTER = + OptionalAttributeConverter.create(StringAttributeConverter.create()); + @Test + public void testTransformTo_nulPropertyIsNull_doesNotThrowNPE() { + AttributeValue av = AttributeValue.builder() + .nul(null) + .s("foo") + .build(); + + CONVERTER.transformTo(av); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperationTest.java index c1f2664f7919..0ccadc5c98ad 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperationTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CommonOperationTest.java @@ -28,6 +28,7 @@ import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.services.dynamodb.DynamoDbClient; @@ -64,7 +65,7 @@ public void stubSpy() { @Test public void execute_defaultImplementation_behavesCorrectlyAndReturnsCorrectResult() { - OperationContext operationContext = OperationContext.create(FAKE_TABLE_NAME, FAKE_INDEX_NAME); + OperationContext operationContext = DefaultOperationContext.create(FAKE_TABLE_NAME, FAKE_INDEX_NAME); String result = spyCommonOperation.execute(FakeItem.getTableSchema(), operationContext, mockDynamoDbEnhancedClientExtension, diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ConditionCheckTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ConditionCheckTest.java index c8eb0a48dc04..1c56f20d19fd 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ConditionCheckTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ConditionCheckTest.java @@ -28,6 +28,7 @@ import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.model.ConditionCheck; @@ -53,7 +54,7 @@ public void generateTransactWriteItem() { .key(k -> k.partitionValue(fakeItem.getId())) .conditionExpression(conditionExpression) .build(); - OperationContext context = OperationContext.create("table-name", TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create("table-name", TableMetadata.primaryIndexName()); TransactWriteItem result = operation.generateTransactWriteItem(FakeItem.getTableSchema(), context, mockDynamoDbEnhancedClientExtension); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperationTest.java index 9f744244d186..08cd1f478356 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperationTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/CreateTableOperationTest.java @@ -37,6 +37,7 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithBinaryKey; @@ -60,9 +61,9 @@ public class CreateTableOperationTest { private static final String TABLE_NAME = "table-name"; private static final OperationContext PRIMARY_CONTEXT = - OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); private static final OperationContext GSI_1_CONTEXT = - OperationContext.create(TABLE_NAME, "gsi_1"); + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); private static MatchedGsi matchesGsi(software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex other) { return new MatchedGsi(other); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/OperationContextTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContextTest.java similarity index 84% rename from services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/OperationContextTest.java rename to services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContextTest.java index e2dadb6199fe..8c37782e588e 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/OperationContextTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DefaultOperationContextTest.java @@ -22,10 +22,10 @@ import org.junit.Test; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; -public class OperationContextTest { +public class DefaultOperationContextTest { @Test public void createWithTableNameAndIndexName() { - OperationContext context = OperationContext.create("table_name", "index_name"); + DefaultOperationContext context = DefaultOperationContext.create("table_name", "index_name"); assertThat(context.tableName(), is("table_name")); assertThat(context.indexName(), is("index_name")); @@ -33,7 +33,7 @@ public void createWithTableNameAndIndexName() { @Test public void createWithTableName() { - OperationContext context = OperationContext.create("table_name"); + DefaultOperationContext context = DefaultOperationContext.create("table_name"); assertThat(context.tableName(), is("table_name")); assertThat(context.indexName(), Matchers.is(TableMetadata.primaryIndexName())); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperationTest.java index e108eb7acf0e..4c990587bce8 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperationTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/DeleteItemOperationTest.java @@ -15,13 +15,13 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.operations; +import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -42,6 +42,7 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; @@ -61,10 +62,11 @@ public class DeleteItemOperationTest { private static final String TABLE_NAME = "table-name"; private static final OperationContext PRIMARY_CONTEXT = - OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); private static final OperationContext GSI_1_CONTEXT = - OperationContext.create(TABLE_NAME, "gsi_1"); + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); private static final Expression CONDITION_EXPRESSION; + private static final Expression MINIMAL_CONDITION_EXPRESSION = Expression.builder().expression("foo = bar").build(); static { Map expressionNames = new HashMap<>(); @@ -162,6 +164,24 @@ public void generateRequest_withConditionExpression() { assertThat(request.expressionAttributeValues(), is(CONDITION_EXPRESSION.expressionValues())); } + @Test + public void generateRequest_withMinimalConditionExpression() { + FakeItem keyItem = createUniqueFakeItem(); + DeleteItemOperation deleteItemOperation = + DeleteItemOperation.create(DeleteItemEnhancedRequest.builder() + .key(k -> k.partitionValue(keyItem.getId())) + .conditionExpression(MINIMAL_CONDITION_EXPRESSION) + .build()); + + DeleteItemRequest request = deleteItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request.conditionExpression(), is(MINIMAL_CONDITION_EXPRESSION.expression())); + assertThat(request.expressionAttributeNames(), is(emptyMap())); + assertThat(request.expressionAttributeValues(), is(emptyMap())); + } + @Test(expected = IllegalArgumentException.class) public void generateRequest_noPartitionKey_throwsIllegalArgumentException() { DeleteItemOperation deleteItemOperation = @@ -270,7 +290,7 @@ public void generateTransactWriteItem_basicRequest() { spy(DeleteItemOperation.create(DeleteItemEnhancedRequest.builder() .key(k -> k.partitionValue(fakeItem.getId())) .build())); - OperationContext context = OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); DeleteItemRequest deleteItemRequest = DeleteItemRequest.builder() .tableName(TABLE_NAME) @@ -300,7 +320,7 @@ public void generateTransactWriteItem_conditionalRequest() { spy(DeleteItemOperation.create(DeleteItemEnhancedRequest.builder() .key(k -> k.partitionValue(fakeItem.getId())) .build())); - OperationContext context = OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); String conditionExpression = "condition-expression"; Map attributeValues = Collections.singletonMap("key", stringValue("value1")); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperationTest.java index ac386af59d34..e20c1f37966e 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperationTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/GetItemOperationTest.java @@ -35,6 +35,7 @@ import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; @@ -51,9 +52,9 @@ public class GetItemOperationTest { private static final String TABLE_NAME = "table-name"; private static final OperationContext PRIMARY_CONTEXT = - OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); private static final OperationContext GSI_1_CONTEXT = - OperationContext.create(TABLE_NAME, "gsi_1"); + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); @Mock private DynamoDbClient mockDynamoDbClient; diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperationTest.java index 21c32a1c3732..bb28acc54809 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperationTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/IndexOperationTest.java @@ -26,6 +26,7 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; @@ -57,7 +58,7 @@ public void executeOnSecondaryIndex_defaultImplementation_callsExecuteCorrectly( assertThat(fakeIndexOperation.lastDynamoDbClient, sameInstance(mockDynamoDbClient)); assertThat(fakeIndexOperation.lastDynamoDbEnhancedClientExtension, sameInstance(mockDynamoDbEnhancedClientExtension)); assertThat(fakeIndexOperation.lastTableSchema, sameInstance(FakeItem.getTableSchema())); - assertThat(fakeIndexOperation.lastOperationContext, is(OperationContext.create(FAKE_TABLE_NAME, FAKE_INDEX_NAME))); + assertThat(fakeIndexOperation.lastOperationContext, is(DefaultOperationContext.create(FAKE_TABLE_NAME, FAKE_INDEX_NAME))); } private static class FakeIndexOperation implements IndexOperation { diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperationTest.java index eaec39926a08..11f772c2e0f2 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperationTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/PutItemOperationTest.java @@ -15,12 +15,12 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.operations; +import static java.util.Collections.emptyMap; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -39,6 +39,7 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; @@ -56,11 +57,12 @@ public class PutItemOperationTest { private static final String TABLE_NAME = "table-name"; private static final OperationContext PRIMARY_CONTEXT = - OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); private static final OperationContext GSI_1_CONTEXT = - OperationContext.create(TABLE_NAME, "gsi_1"); + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); private static final Expression CONDITION_EXPRESSION; private static final Expression CONDITION_EXPRESSION_2; + private static final Expression MINIMAL_CONDITION_EXPRESSION = Expression.builder().expression("foo = bar").build(); static { Map expressionNames = new HashMap<>(); @@ -166,6 +168,24 @@ public void generateRequest_withConditionExpression_generatesCorrectRequest() { assertThat(request, is(expectedRequest)); } + @Test + public void generateRequest_withMinimalConditionExpression() { + FakeItem fakeItem = createUniqueFakeItem(); + PutItemOperation putItemOperation = + PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) + .item(fakeItem) + .conditionExpression(MINIMAL_CONDITION_EXPRESSION) + .build()); + + PutItemRequest request = putItemOperation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(request.conditionExpression(), is(MINIMAL_CONDITION_EXPRESSION.expression())); + assertThat(request.expressionAttributeNames(), is(emptyMap())); + assertThat(request.expressionAttributeValues(), is(emptyMap())); + } + @Test public void generateRequest_withConditionExpression_andExtensionWithSingleCondition() { FakeItem baseFakeItem = createUniqueFakeItem(); @@ -274,7 +294,7 @@ public void generateTransactWriteItem_basicRequest() { PutItemOperation putItemOperation = spy(PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) .item(fakeItem) .build())); - OperationContext context = OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); PutItemRequest putItemRequest = PutItemRequest.builder() .tableName(TABLE_NAME) @@ -303,7 +323,7 @@ public void generateTransactWriteItem_conditionalRequest() { PutItemOperation putItemOperation = spy(PutItemOperation.create(PutItemEnhancedRequest.builder(FakeItem.class) .item(fakeItem) .build())); - OperationContext context = OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); String conditionExpression = "condition-expression"; Map attributeValues = Collections.singletonMap("key", stringValue("value1")); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperationTest.java index 020e41c83a84..963de03e6cf6 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperationTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/QueryOperationTest.java @@ -49,6 +49,7 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; @@ -70,9 +71,9 @@ public class QueryOperationTest { private static final String TABLE_NAME = "table-name"; private static final OperationContext PRIMARY_CONTEXT = - OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); private static final OperationContext GSI_1_CONTEXT = - OperationContext.create(TABLE_NAME, "gsi_1"); + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); private final FakeItem keyItem = createUniqueFakeItem(); private final QueryOperation queryOperation = @@ -272,6 +273,23 @@ public void generateRequest_consistentRead() { assertThat(queryRequest.consistentRead(), is(true)); } + @Test + public void generateRequest_projectionExpression() { + QueryOperation queryToTest = + QueryOperation.create(QueryEnhancedRequest.builder() + .queryConditional(keyEqualTo(k -> k.partitionValue(keyItem.getId()))) + .attributesToProject("id") + .addAttributeToProject("version") + .build()); + QueryRequest queryRequest = queryToTest.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + assertThat(queryRequest.projectionExpression(), is("#AMZN_MAPPED_id,#AMZN_MAPPED_version")); + assertThat(queryRequest.expressionAttributeNames().get("#AMZN_MAPPED_id"), is ("id")); + assertThat(queryRequest.expressionAttributeNames().get("#AMZN_MAPPED_version"), is ("version")); + } + @Test public void generateRequest_hashKeyOnly_withExclusiveStartKey() { FakeItem exclusiveStartKey = createUniqueFakeItem(); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperationTest.java index 176ce174adba..8228be4add3d 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperationTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/ScanOperationTest.java @@ -30,6 +30,7 @@ import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -45,6 +46,7 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; @@ -65,9 +67,9 @@ public class ScanOperationTest { private static final String TABLE_NAME = "table-name"; private static final OperationContext PRIMARY_CONTEXT = - OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); private static final OperationContext GSI_1_CONTEXT = - OperationContext.create(TABLE_NAME, "gsi_1"); + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); private final ScanOperation scanOperation = ScanOperation.create(ScanEnhancedRequest.builder().build()); @@ -188,6 +190,30 @@ public void generateRequest_consistentRead() { assertThat(request, is(expectedRequest)); } + @Test + public void generateRequest_projectionExpression() { + ScanOperation operation = ScanOperation.create( + ScanEnhancedRequest.builder() + .attributesToProject("id") + .addAttributeToProject("version") + .build() + ); + ScanRequest request = operation.generateRequest(FakeItem.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedExpressionAttributeNames = new HashMap<>(); + expectedExpressionAttributeNames.put("#AMZN_MAPPED_id", "id"); + expectedExpressionAttributeNames.put("#AMZN_MAPPED_version", "version"); + + ScanRequest expectedRequest = ScanRequest.builder() + .tableName(TABLE_NAME) + .projectionExpression("#AMZN_MAPPED_id,#AMZN_MAPPED_version") + .expressionAttributeNames(expectedExpressionAttributeNames) + .build(); + assertThat(request, is(expectedRequest)); + } + @Test public void generateRequest_hashKeyOnly_exclusiveStartKey() { FakeItem exclusiveStartKey = createUniqueFakeItem(); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperationTest.java index e1dc40ac5170..81b2e725ae10 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperationTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/TableOperationTest.java @@ -26,6 +26,7 @@ import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; @@ -57,7 +58,7 @@ public void executeOnPrimaryIndex_defaultImplementation_callsExecuteCorrectly() assertThat(fakeTableOperation.lastDynamoDbEnhancedClientExtension, sameInstance(mockDynamoDbEnhancedClientExtension)); assertThat(fakeTableOperation.lastTableSchema, sameInstance(FakeItem.getTableSchema())); assertThat(fakeTableOperation.lastOperationContext, is( - OperationContext.create(FAKE_TABLE_NAME, TableMetadata.primaryIndexName()))); + DefaultOperationContext.create(FAKE_TABLE_NAME, TableMetadata.primaryIndexName()))); } private static class FakeTableOperation implements TableOperation { diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperationTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperationTest.java index 4f3738576264..4698dad3ad2f 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperationTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/operations/UpdateItemOperationTest.java @@ -26,7 +26,6 @@ import static org.hamcrest.Matchers.sameInstance; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -46,6 +45,7 @@ import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbExtensionContext; import software.amazon.awssdk.enhanced.dynamodb.Expression; +import software.amazon.awssdk.enhanced.dynamodb.OperationContext; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.extensions.ReadModification; import software.amazon.awssdk.enhanced.dynamodb.extensions.WriteModification; @@ -72,11 +72,12 @@ public class UpdateItemOperationTest { private static final String SUBCLASS_ATTRIBUTE_VALUE = ":AMZN_MAPPED_subclass_attribute"; private static final OperationContext PRIMARY_CONTEXT = - OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); private static final OperationContext GSI_1_CONTEXT = - OperationContext.create(TABLE_NAME, "gsi_1"); + DefaultOperationContext.create(TABLE_NAME, "gsi_1"); private static final Expression CONDITION_EXPRESSION; - private static final Expression CONDITION_EXPRESSION_2; + private static final Expression MINIMAL_CONDITION_EXPRESSION = Expression.builder().expression("foo = bar").build(); + static { Map expressionNames = new HashMap<>(); @@ -92,20 +93,6 @@ public class UpdateItemOperationTest { .build(); } - static { - Map expressionNames = new HashMap<>(); - expressionNames.put("#test_field_3", "test_field_3"); - expressionNames.put("#test_field_4", "test_field_4"); - Map expressionValues = new HashMap<>(); - expressionValues.put(":test_value_3", numberValue(3)); - expressionValues.put(":test_value_4", numberValue(4)); - CONDITION_EXPRESSION_2 = Expression.builder() - .expression("#test_field_3 = :test_value_3 OR #test_field_4 = :test_value_4") - .expressionNames(Collections.unmodifiableMap(expressionNames)) - .expressionValues(Collections.unmodifiableMap(expressionValues)) - .build(); - } - @Mock private DynamoDbClient mockDynamoDbClient; @@ -205,6 +192,31 @@ public void generateRequest_withConditionExpression() { assertThat(request, is(expectedRequest)); } + @Test + public void generateRequest_withMinimalConditionExpression() { + FakeItemWithSort item = createUniqueFakeItemWithSort(); + item.setOtherAttribute1("value-1"); + + UpdateItemOperation updateItemOperation = + UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItemWithSort.class) + .item(item) + .conditionExpression(MINIMAL_CONDITION_EXPRESSION) + .build()); + + UpdateItemRequest request = updateItemOperation.generateRequest(FakeItemWithSort.getTableSchema(), + PRIMARY_CONTEXT, + null); + + Map expectedValues = new HashMap<>(); + expectedValues.put(OTHER_ATTRIBUTE_1_VALUE, AttributeValue.builder().s("value-1").build()); + Map expectedNames = new HashMap<>(); + expectedNames.put(OTHER_ATTRIBUTE_1_NAME, "other_attribute_1"); + expectedNames.put(OTHER_ATTRIBUTE_2_NAME, "other_attribute_2"); + assertThat(request.conditionExpression(), is(MINIMAL_CONDITION_EXPRESSION.expression())); + assertThat(request.expressionAttributeNames(), is(expectedNames)); + assertThat(request.expressionAttributeValues(), is(expectedValues)); + } + @Test public void generateRequest_explicitlyUnsetIgnoreNulls() { FakeItemWithSort item = createUniqueFakeItemWithSort(); @@ -693,7 +705,7 @@ public void generateTransactWriteItem_basicRequest() { Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); UpdateItemOperation updateItemOperation = spy(UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class).item(fakeItem).build())); - OperationContext context = OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); String updateExpression = "update-expression"; Map attributeValues = Collections.singletonMap("key", stringValue("value1")); Map attributeNames = Collections.singletonMap("key", "value2"); @@ -730,7 +742,7 @@ public void generateTransactWriteItem_conditionalRequest() { Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); UpdateItemOperation updateItemOperation = spy(UpdateItemOperation.create(UpdateItemEnhancedRequest.builder(FakeItem.class).item(fakeItem).build())); - OperationContext context = OperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); + OperationContext context = DefaultOperationContext.create(TABLE_NAME, TableMetadata.primaryIndexName()); String updateExpression = "update-expression"; String conditionExpression = "condition-expression"; Map attributeValues = Collections.singletonMap("key", stringValue("value1")); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchemaTest.java index 6d361f72e4ba..0dd88c8eabea 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchemaTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/BeanTableSchemaTest.java @@ -44,9 +44,9 @@ import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.AttributeConverterBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.AttributeConverterNoConstructorBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.CommonTypesBean; -import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.ConverterBean; -import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.ConverterNoConstructorBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.DocumentBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.EmptyConverterProvidersInvalidBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.EmptyConverterProvidersValidBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.EnumBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.ExtendedBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.FlattenedBean; @@ -54,6 +54,8 @@ import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.InvalidBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.ListBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.MapBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.MultipleConverterProvidersBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.NoConstructorConverterProvidersBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.ParameterizedAbstractBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.ParameterizedDocumentBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.PrimitiveTypesBean; @@ -62,6 +64,7 @@ import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SetBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SetterAnnotatedBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SimpleBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SingleConverterProvidersBean; import software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans.SortKeyBean; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @@ -866,29 +869,10 @@ public void itemType_returnsCorrectClass() { } @Test - public void usesCustomAttributeConverterProvider() { - BeanTableSchema beanTableSchema = BeanTableSchema.create(ConverterBean.class); - - ConverterBean converterBean = new ConverterBean(); - converterBean.setId("id-value"); - converterBean.setIntegerAttribute(123); - - Map itemMap = beanTableSchema.itemToMap(converterBean, false); - - assertThat(itemMap.size(), is(2)); - assertThat(itemMap, hasEntry("id", stringValue("id-value-custom"))); - assertThat(itemMap, hasEntry("integerAttribute", numberValue(133))); - - ConverterBean reverse = beanTableSchema.mapToItem(itemMap); - assertThat(reverse.getId(), is(equalTo("id-value-custom"))); - assertThat(reverse.getIntegerAttribute(), is(equalTo(133))); - } - - @Test - public void converterProviderWithoutConstructor_throwsIllegalArgumentException() { + public void attributeConverterWithoutConstructor_throwsIllegalArgumentException() { exception.expect(IllegalArgumentException.class); exception.expectMessage("default constructor"); - BeanTableSchema.create(ConverterNoConstructorBean.class); + BeanTableSchema.create(AttributeConverterNoConstructorBean.class); } @Test @@ -915,9 +899,74 @@ public void usesCustomAttributeConverter() { } @Test - public void attributeConverterWithoutConstructor_throwsIllegalArgumentException() { + public void converterProviderWithoutConstructor_throwsIllegalArgumentException() { exception.expect(IllegalArgumentException.class); exception.expectMessage("default constructor"); - BeanTableSchema.create(AttributeConverterNoConstructorBean.class); + BeanTableSchema.create(NoConstructorConverterProvidersBean.class); + } + + @Test + public void usesCustomAttributeConverterProvider() { + BeanTableSchema beanTableSchema = BeanTableSchema.create(SingleConverterProvidersBean.class); + + SingleConverterProvidersBean converterBean = new SingleConverterProvidersBean(); + converterBean.setId("id-value"); + converterBean.setIntegerAttribute(123); + + Map itemMap = beanTableSchema.itemToMap(converterBean, false); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value-custom"))); + assertThat(itemMap, hasEntry("integerAttribute", numberValue(133))); + + SingleConverterProvidersBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse.getId(), is(equalTo("id-value-custom"))); + assertThat(reverse.getIntegerAttribute(), is(equalTo(133))); + } + + @Test + public void usesCustomAttributeConverterProviders() { + BeanTableSchema beanTableSchema = + BeanTableSchema.create(MultipleConverterProvidersBean.class); + + MultipleConverterProvidersBean converterBean = new MultipleConverterProvidersBean(); + converterBean.setId("id-value"); + converterBean.setIntegerAttribute(123); + + Map itemMap = beanTableSchema.itemToMap(converterBean, false); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value-custom"))); + assertThat(itemMap, hasEntry("integerAttribute", numberValue(133))); + + MultipleConverterProvidersBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse.getId(), is(equalTo("id-value-custom"))); + assertThat(reverse.getIntegerAttribute(), is(equalTo(133))); + } + + @Test + public void emptyConverterProviderList_fails_whenAttributeConvertersAreMissing() { + exception.expect(NullPointerException.class); + BeanTableSchema.create(EmptyConverterProvidersInvalidBean.class); + } + + @Test + public void emptyConverterProviderList_correct_whenAttributeConvertersAreSupplied() { + BeanTableSchema beanTableSchema = + BeanTableSchema.create(EmptyConverterProvidersValidBean.class); + + EmptyConverterProvidersValidBean converterBean = new EmptyConverterProvidersValidBean(); + converterBean.setId("id-value"); + converterBean.setIntegerAttribute(123); + + Map itemMap = beanTableSchema.itemToMap(converterBean, false); + + assertThat(itemMap.size(), is(2)); + assertThat(itemMap, hasEntry("id", stringValue("id-value-custom"))); + assertThat(itemMap, hasEntry("integerAttribute", numberValue(133))); + + EmptyConverterProvidersValidBean reverse = beanTableSchema.mapToItem(itemMap); + assertThat(reverse.getId(), is(equalTo("id-value-custom"))); + assertThat(reverse.getIntegerAttribute(), is(equalTo(133))); } } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTest.java index 7eb8cea289d6..ad9503bc0344 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticAttributeTest.java @@ -90,6 +90,7 @@ public void build_maximal() { .getter(TEST_GETTER) .setter(TEST_SETTER) .tags(mockTag) + .attributeConverter(attributeConverter) .build(); assertThat(staticAttribute.name()).isEqualTo("test-attribute"); @@ -97,6 +98,7 @@ public void build_maximal() { assertThat(staticAttribute.setter()).isSameAs(TEST_SETTER); assertThat(staticAttribute.tags()).containsExactly(mockTag); assertThat(staticAttribute.type()).isEqualTo(EnhancedType.of(String.class)); + assertThat(staticAttribute.attributeConverter()).isSameAs(attributeConverter); } @Test @@ -151,6 +153,7 @@ public void toBuilder() { .getter(TEST_GETTER) .setter(TEST_SETTER) .tags(mockTag, mockTag2) + .attributeConverter(attributeConverter) .build(); StaticAttribute clonedAttribute = staticAttribute.toBuilder().build(); @@ -160,6 +163,7 @@ public void toBuilder() { assertThat(clonedAttribute.setter()).isSameAs(TEST_SETTER); assertThat(clonedAttribute.tags()).containsExactly(mockTag, mockTag2); assertThat(clonedAttribute.type()).isEqualTo(EnhancedType.of(String.class)); + assertThat(clonedAttribute.attributeConverter()).isSameAs(attributeConverter); } @Test diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java index c5789aad3029..7ef020a15d52 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/StaticTableSchemaTest.java @@ -780,10 +780,16 @@ public Consumer modifyMetadata() { } @Mock - private AttributeConverterProvider provider; + private AttributeConverterProvider provider1; @Mock - private AttributeConverter attributeConverter; + private AttributeConverterProvider provider2; + + @Mock + private AttributeConverter attributeConverter1; + + @Mock + private AttributeConverter attributeConverter2; @Rule public ExpectedException exception = ExpectedException.none(); @@ -1366,8 +1372,8 @@ public void instantiateFlattenedAbstractClassShouldThrowException() { } @Test - public void addAttributeConverterProvider() { - when(provider.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter); + public void addSingleAttributeConverterProvider() { + when(provider1.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter1); StaticTableSchema tableSchema = StaticTableSchema.builder(FakeMappedItem.class) @@ -1375,10 +1381,10 @@ public void addAttributeConverterProvider() { .addAttribute(String.class, a -> a.name("aString") .getter(FakeMappedItem::getAString) .setter(FakeMappedItem::setAString)) - .attributeConverterProvider(provider) + .attributeConverterProviders(provider1) .build(); - assertThat(tableSchema.attributeConverterProvider(), is(provider)); + assertThat(tableSchema.attributeConverterProvider(), is(provider1)); } @Test @@ -1386,8 +1392,8 @@ public void usesCustomAttributeConverterProvider() { String originalString = "test-string"; String expectedString = "test-string-custom"; - when(provider.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter); - when(attributeConverter.transformFrom(any())).thenReturn(AttributeValue.builder().s(expectedString).build()); + when(provider1.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter1); + when(attributeConverter1.transformFrom(any())).thenReturn(AttributeValue.builder().s(expectedString).build()); StaticTableSchema tableSchema = StaticTableSchema.builder(FakeMappedItem.class) @@ -1395,11 +1401,69 @@ public void usesCustomAttributeConverterProvider() { .addAttribute(String.class, a -> a.name("aString") .getter(FakeMappedItem::getAString) .setter(FakeMappedItem::setAString)) - .attributeConverterProvider(provider) + .attributeConverterProviders(provider1) .build(); + Map resultMap = + tableSchema.itemToMap(FakeMappedItem.builder().aString(originalString).build(), false); + assertThat(resultMap.get("aString").s(), is(expectedString)); + } + + @Test + public void usesCustomAttributeConverterProviders() { + String originalString = "test-string"; + String expectedString = "test-string-custom"; + + when(provider2.converterFor(EnhancedType.of(String.class))).thenReturn(attributeConverter2); + when(attributeConverter2.transformFrom(any())).thenReturn(AttributeValue.builder().s(expectedString).build()); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .attributeConverterProviders(provider1, provider2) + .build(); + + Map resultMap = + tableSchema.itemToMap(FakeMappedItem.builder().aString(originalString).build(), false); + assertThat(resultMap.get("aString").s(), is(expectedString)); + } + + @Test + public void noConverterProvider_throwsException_whenMissingAttributeConverters() { + exception.expect(NullPointerException.class); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString)) + .attributeConverterProviders(Collections.emptyList()) + .build(); + } + + @Test + public void noConverterProvider_handlesCorrectly_whenAttributeConvertersAreSupplied() { + String originalString = "test-string"; + String expectedString = "test-string-custom"; + + when(attributeConverter1.transformFrom(any())).thenReturn(AttributeValue.builder().s(expectedString).build()); + + StaticTableSchema tableSchema = + StaticTableSchema.builder(FakeMappedItem.class) + .newItemSupplier(FakeMappedItem::new) + .addAttribute(String.class, a -> a.name("aString") + .getter(FakeMappedItem::getAString) + .setter(FakeMappedItem::setAString) + .attributeConverter(attributeConverter1)) + .attributeConverterProviders(Collections.emptyList()) + .build(); + Map resultMap = tableSchema.itemToMap(FakeMappedItem.builder().aString(originalString).build(), - false); + false); assertThat(resultMap.get("aString").s(), is(expectedString)); } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AttributeConverterBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AttributeConverterBean.java index 62e29b3c7616..3f27afd6173f 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AttributeConverterBean.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/AttributeConverterBean.java @@ -61,12 +61,13 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; AttributeConverterBean that = (AttributeConverterBean) o; return Objects.equals(id, that.id) && - Objects.equals(integerAttribute, that.integerAttribute); + Objects.equals(integerAttribute, that.integerAttribute) && + Objects.equals(attributeItem, that.attributeItem); } @Override public int hashCode() { - return Objects.hash(id, integerAttribute); + return Objects.hash(id, integerAttribute, attributeItem); } public static class CustomAttributeConverter implements AttributeConverter { @@ -76,12 +77,12 @@ public CustomAttributeConverter() { @Override public AttributeValue transformFrom(AttributeItem input) { - return EnhancedAttributeValue.fromString(input.innerValue).toAttributeValue(); + return EnhancedAttributeValue.fromString(input.getInnerValue()).toAttributeValue(); } @Override public AttributeItem transformTo(AttributeValue input) { - return null; + return new AttributeItem(input.s()); } @Override @@ -96,7 +97,14 @@ public AttributeValueType attributeValueType() { } public static class AttributeItem { - String innerValue; + private String innerValue; + + public AttributeItem() { + } + + AttributeItem(String value) { + innerValue = value; + } public String getInnerValue() { return innerValue; @@ -105,5 +113,18 @@ public String getInnerValue() { public void setInnerValue(String innerValue) { this.innerValue = innerValue; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AttributeItem that = (AttributeItem) o; + return Objects.equals(innerValue, that.innerValue); + } + + @Override + public int hashCode() { + return Objects.hash(innerValue); + } } } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersInvalidBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersInvalidBean.java new file mode 100644 index 000000000000..60c2d2d9fc9c --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersInvalidBean.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbConvertedBy; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@DynamoDbBean(converterProviders = {}) +public class EmptyConverterProvidersInvalidBean { + private String id; + private Integer integerAttribute; + + @DynamoDbPartitionKey + @DynamoDbConvertedBy(CustomStringAttributeConverter.class) + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public Integer getIntegerAttribute() { + return integerAttribute; + } + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + EmptyConverterProvidersInvalidBean that = (EmptyConverterProvidersInvalidBean) o; + return Objects.equals(id, that.id) && + Objects.equals(integerAttribute, that.integerAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, integerAttribute); + } + + public static class CustomStringAttributeConverter implements AttributeConverter { + final static String DEFAULT_SUFFIX = "-custom"; + + public CustomStringAttributeConverter() { + } + + @Override + public AttributeValue transformFrom(String input) { + return EnhancedAttributeValue.fromString(input + DEFAULT_SUFFIX).toAttributeValue(); + } + + @Override + public String transformTo(AttributeValue input) { + return input.s(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(String.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + } + +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersValidBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersValidBean.java new file mode 100644 index 000000000000..ceee30289a91 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/EmptyConverterProvidersValidBean.java @@ -0,0 +1,119 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.IntegerStringConverter; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbConvertedBy; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; + +@DynamoDbBean(converterProviders = {}) +public class EmptyConverterProvidersValidBean { + private String id; + private Integer integerAttribute; + + @DynamoDbPartitionKey + @DynamoDbConvertedBy(CustomStringAttributeConverter.class) + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + @DynamoDbConvertedBy(CustomIntegerAttributeConverter.class) + public Integer getIntegerAttribute() { + return integerAttribute; + } + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + EmptyConverterProvidersValidBean that = (EmptyConverterProvidersValidBean) o; + return Objects.equals(id, that.id) && + Objects.equals(integerAttribute, that.integerAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, integerAttribute); + } + + public static class CustomStringAttributeConverter implements AttributeConverter { + final static String DEFAULT_SUFFIX = "-custom"; + + public CustomStringAttributeConverter() { + } + + @Override + public AttributeValue transformFrom(String input) { + return EnhancedAttributeValue.fromString(input + DEFAULT_SUFFIX).toAttributeValue(); + } + + @Override + public String transformTo(AttributeValue input) { + return input.s(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(String.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + } + + public static class CustomIntegerAttributeConverter implements AttributeConverter { + final static Integer DEFAULT_INCREMENT = 10; + + public CustomIntegerAttributeConverter() { + } + + @Override + public AttributeValue transformFrom(Integer input) { + return EnhancedAttributeValue.fromNumber(IntegerStringConverter.create().toString(input + DEFAULT_INCREMENT)) + .toAttributeValue(); + } + + @Override + public Integer transformTo(AttributeValue input) { + return Integer.valueOf(input.n()); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Integer.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/MultipleConverterProvidersBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/MultipleConverterProvidersBean.java new file mode 100644 index 000000000000..b3a3578c299f --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/MultipleConverterProvidersBean.java @@ -0,0 +1,139 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.mapper.testbeans; + +import java.util.Map; +import java.util.Objects; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; +import software.amazon.awssdk.enhanced.dynamodb.AttributeConverterProvider; +import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; +import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue; +import software.amazon.awssdk.enhanced.dynamodb.internal.converter.string.IntegerStringConverter; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; +import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbPartitionKey; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.utils.ImmutableMap; + +@DynamoDbBean(converterProviders = { + MultipleConverterProvidersBean.FirstAttributeConverterProvider.class, + MultipleConverterProvidersBean.SecondAttributeConverterProvider.class}) +public class MultipleConverterProvidersBean { + private String id; + private Integer integerAttribute; + + @DynamoDbPartitionKey + public String getId() { + return this.id; + } + public void setId(String id) { + this.id = id; + } + + public Integer getIntegerAttribute() { + return integerAttribute; + } + public void setIntegerAttribute(Integer integerAttribute) { + this.integerAttribute = integerAttribute; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + MultipleConverterProvidersBean that = (MultipleConverterProvidersBean) o; + return Objects.equals(id, that.id) && + Objects.equals(integerAttribute, that.integerAttribute); + } + + @Override + public int hashCode() { + return Objects.hash(id, integerAttribute); + } + + public static class FirstAttributeConverterProvider implements AttributeConverterProvider { + @SuppressWarnings("unchecked") + @Override + public AttributeConverter converterFor(EnhancedType enhancedType) { + return null; + } + } + + public static class SecondAttributeConverterProvider implements AttributeConverterProvider { + + private final Map, AttributeConverter> converterCache = ImmutableMap.of( + EnhancedType.of(String.class), new CustomStringAttributeConverter(), + EnhancedType.of(Integer.class), new CustomIntegerAttributeConverter() + ); + + @SuppressWarnings("unchecked") + @Override + public AttributeConverter converterFor(EnhancedType enhancedType) { + return (AttributeConverter) converterCache.get(enhancedType); + } + } + + private static class CustomStringAttributeConverter implements AttributeConverter { + + final static String DEFAULT_SUFFIX = "-custom"; + + @Override + public AttributeValue transformFrom(String input) { + return EnhancedAttributeValue.fromString(input + DEFAULT_SUFFIX).toAttributeValue(); + } + + @Override + public String transformTo(AttributeValue input) { + return input.s(); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(String.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.S; + } + } + + private static class CustomIntegerAttributeConverter implements AttributeConverter { + + final static Integer DEFAULT_INCREMENT = 10; + + @Override + public AttributeValue transformFrom(Integer input) { + return EnhancedAttributeValue.fromNumber(IntegerStringConverter.create().toString(input + DEFAULT_INCREMENT)) + .toAttributeValue(); + } + + @Override + public Integer transformTo(AttributeValue input) { + return Integer.valueOf(input.n()); + } + + @Override + public EnhancedType type() { + return EnhancedType.of(Integer.class); + } + + @Override + public AttributeValueType attributeValueType() { + return AttributeValueType.N; + } + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ConverterNoConstructorBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/NoConstructorConverterProvidersBean.java similarity index 87% rename from services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ConverterNoConstructorBean.java rename to services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/NoConstructorConverterProvidersBean.java index 3d849d9400b0..72212fe4a2d5 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ConverterNoConstructorBean.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/NoConstructorConverterProvidersBean.java @@ -20,8 +20,8 @@ import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; import software.amazon.awssdk.enhanced.dynamodb.mapper.annotations.DynamoDbBean; -@DynamoDbBean(converterProviders = ConverterNoConstructorBean.CustomAttributeConverterProvider.class) -public class ConverterNoConstructorBean extends AbstractBean { +@DynamoDbBean(converterProviders = NoConstructorConverterProvidersBean.CustomAttributeConverterProvider.class) +public class NoConstructorConverterProvidersBean extends AbstractBean { public static class CustomAttributeConverterProvider implements AttributeConverterProvider { diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ConverterBean.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SingleConverterProvidersBean.java similarity index 95% rename from services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ConverterBean.java rename to services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SingleConverterProvidersBean.java index 6500ad16d2f4..5e715b297e63 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/ConverterBean.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mapper/testbeans/SingleConverterProvidersBean.java @@ -28,8 +28,8 @@ import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.utils.ImmutableMap; -@DynamoDbBean(converterProviders = ConverterBean.CustomAttributeConverterProvider.class) -public class ConverterBean { +@DynamoDbBean(converterProviders = SingleConverterProvidersBean.CustomAttributeConverterProvider.class) +public class SingleConverterProvidersBean { private String id; private Integer integerAttribute; @@ -52,7 +52,7 @@ public void setIntegerAttribute(Integer integerAttribute) { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - ConverterBean that = (ConverterBean) o; + SingleConverterProvidersBean that = (SingleConverterProvidersBean) o; return Objects.equals(id, that.id) && Objects.equals(integerAttribute, that.integerAttribute); } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/AsyncBatchGetItemTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/AsyncBatchGetItemTest.java index b61b94c604bb..c3893f46426a 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/AsyncBatchGetItemTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/AsyncBatchGetItemTest.java @@ -55,6 +55,7 @@ public void setup() { .region(Region.US_WEST_2) .credentialsProvider(() -> AwsBasicCredentials.create("foo", "bar")) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .endpointDiscoveryEnabled(false) .build(); enhancedClient = DynamoDbEnhancedAsyncClient.builder() .dynamoDbClient(dynamoDbClient) diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/BatchGetItemTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/BatchGetItemTest.java index 2c4c120dbbe6..7c455236da94 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/BatchGetItemTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/mocktests/BatchGetItemTest.java @@ -56,6 +56,7 @@ public void setup() { .region(Region.US_WEST_2) .credentialsProvider(() -> AwsBasicCredentials.create("foo", "bar")) .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .endpointDiscoveryEnabled(false) .build(); enhancedClient = DynamoDbEnhancedClient.builder() .dynamoDbClient(dynamoDbClient) diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequestTest.java index 24b1bc710e16..2b146cf3f194 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequestTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/QueryEnhancedRequestTest.java @@ -23,7 +23,10 @@ import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; import static software.amazon.awssdk.enhanced.dynamodb.model.QueryConditional.keyEqualTo; +import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import org.junit.Test; import org.junit.runner.RunWith; @@ -44,6 +47,7 @@ public void builder_minimal() { assertThat(builtObject.limit(), is(nullValue())); assertThat(builtObject.queryConditional(), is(nullValue())); assertThat(builtObject.scanIndexForward(), is(nullValue())); + assertThat(builtObject.attributesToProject(), is(nullValue())); } @Test @@ -60,6 +64,11 @@ public void builder_maximal() { QueryConditional queryConditional = keyEqualTo(k -> k.partitionValue("id-value")); + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + List attributesToProject = new ArrayList<>(Arrays.asList(attributesToProjectArray)); + attributesToProject.add(additionalElement); + QueryEnhancedRequest builtObject = QueryEnhancedRequest.builder() .exclusiveStartKey(exclusiveStartKey) .consistentRead(false) @@ -67,6 +76,8 @@ public void builder_maximal() { .limit(3) .queryConditional(queryConditional) .scanIndexForward(true) + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) .build(); assertThat(builtObject.exclusiveStartKey(), is(exclusiveStartKey)); @@ -75,6 +86,7 @@ public void builder_maximal() { assertThat(builtObject.limit(), is(3)); assertThat(builtObject.queryConditional(), is(queryConditional)); assertThat(builtObject.scanIndexForward(), is(true)); + assertThat(builtObject.attributesToProject(), is(attributesToProject)); } @Test diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequestTest.java index 862e5f7ec8e5..ce2a86f443c6 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequestTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/ScanEnhancedRequestTest.java @@ -22,7 +22,10 @@ import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.numberValue; import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import org.junit.Test; import org.junit.runner.RunWith; @@ -40,6 +43,7 @@ public void builder_minimal() { assertThat(builtObject.exclusiveStartKey(), is(nullValue())); assertThat(builtObject.consistentRead(), is(nullValue())); assertThat(builtObject.filterExpression(), is(nullValue())); + assertThat(builtObject.attributesToProject(), is(nullValue())); assertThat(builtObject.limit(), is(nullValue())); } @@ -55,16 +59,24 @@ public void builder_maximal() { .expressionValues(expressionValues) .build(); + String[] attributesToProjectArray = {"one", "two"}; + String additionalElement = "three"; + List attributesToProject = new ArrayList<>(Arrays.asList(attributesToProjectArray)); + attributesToProject.add(additionalElement); + ScanEnhancedRequest builtObject = ScanEnhancedRequest.builder() .exclusiveStartKey(exclusiveStartKey) .consistentRead(false) .filterExpression(filterExpression) + .attributesToProject(attributesToProjectArray) + .addAttributeToProject(additionalElement) .limit(3) .build(); assertThat(builtObject.exclusiveStartKey(), is(exclusiveStartKey)); assertThat(builtObject.consistentRead(), is(false)); assertThat(builtObject.filterExpression(), is(filterExpression)); + assertThat(builtObject.attributesToProject(), is(attributesToProject)); assertThat(builtObject.limit(), is(3)); } diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequestTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequestTest.java index 158d3b879b6c..c17becfe54bd 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequestTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/model/TransactWriteItemsEnhancedRequestTest.java @@ -20,27 +20,33 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertEquals; import static software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem.createUniqueFakeItem; import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.UUID; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClient; +import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; import software.amazon.awssdk.enhanced.dynamodb.Expression; import software.amazon.awssdk.enhanced.dynamodb.Key; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItem; +import software.amazon.awssdk.enhanced.dynamodb.internal.client.ExtensionResolver; +import software.amazon.awssdk.enhanced.dynamodb.internal.operations.TransactWriteItemsOperation; import software.amazon.awssdk.services.dynamodb.DynamoDbClient; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.Delete; import software.amazon.awssdk.services.dynamodb.model.Put; import software.amazon.awssdk.services.dynamodb.model.TransactWriteItem; +import software.amazon.awssdk.services.dynamodb.model.TransactWriteItemsRequest; @RunWith(MockitoJUnitRunner.class) public class TransactWriteItemsEnhancedRequestTest { @@ -167,6 +173,17 @@ public void builder_maximal_builder_style() { assertThat(builtObject.transactWriteItems().get(3).conditionCheck().key().get("id").s(), is(fakeItem.getId())); } + @Test + public void builder_passRequestToken_shouldWork() { + String token = UUID.randomUUID().toString(); + TransactWriteItemsEnhancedRequest enhancedRequest = TransactWriteItemsEnhancedRequest.builder() + .clientRequestToken(token) + .build(); + DynamoDbEnhancedClientExtension extension = ExtensionResolver.resolveExtensions(ExtensionResolver.defaultExtensions()); + TransactWriteItemsRequest request = TransactWriteItemsOperation.create(enhancedRequest).generateRequest(extension); + assertEquals(token, request.clientRequestToken()); + } + private List getTransactWriteItems(FakeItem fakeItem) { final Map fakeItemMap = FakeItem.getTableSchema().itemToMap(fakeItem, true); diff --git a/services-custom/pom.xml b/services-custom/pom.xml index 9344a9be3836..3925604550ab 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index a0974ef0ab80..76b3d4e6f8f7 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json b/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json index cd8928f5cf38..51f6803af99d 100644 --- a/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json +++ b/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json @@ -404,7 +404,7 @@ }, "sharedVia":{ "shape":"SharedViaList", - "documentation":"

Indicates how the access that generated the finding is granted.

" + "documentation":"

Indicates how the access that generated the finding is granted. This is populated for Amazon S3 bucket findings.

" }, "status":{ "shape":"FindingStatus", @@ -780,6 +780,10 @@ "shape":"ResourceType", "documentation":"

The type of the resource reported in the finding.

" }, + "sources":{ + "shape":"FindingSourceList", + "documentation":"

The sources of the finding. This indicates how the access that generated the finding is granted. It is populated for Amazon S3 bucket findings.

" + }, "status":{ "shape":"FindingStatus", "documentation":"

The current status of the finding.

" @@ -796,6 +800,43 @@ "type":"list", "member":{"shape":"FindingId"} }, + "FindingSource":{ + "type":"structure", + "required":["type"], + "members":{ + "detail":{ + "shape":"FindingSourceDetail", + "documentation":"

Includes details about how the access that generated the finding is granted. This is populated for Amazon S3 bucket findings.

" + }, + "type":{ + "shape":"FindingSourceType", + "documentation":"

Indicates the type of access that generated the finding.

" + } + }, + "documentation":"

The source of the finding. This indicates how the access that generated the finding is granted. It is populated for Amazon S3 bucket findings.

" + }, + "FindingSourceDetail":{ + "type":"structure", + "members":{ + "accessPointArn":{ + "shape":"String", + "documentation":"

The ARN of the access point that generated the finding.

" + } + }, + "documentation":"

Includes details about how the access that generated the finding is granted. This is populated for Amazon S3 bucket findings.

" + }, + "FindingSourceList":{ + "type":"list", + "member":{"shape":"FindingSource"} + }, + "FindingSourceType":{ + "type":"string", + "enum":[ + "BUCKET_ACL", + "POLICY", + "S3_ACCESS_POINT" + ] + }, "FindingStatus":{ "type":"string", "enum":[ @@ -868,6 +909,10 @@ "shape":"ResourceType", "documentation":"

The type of the resource that the external principal has access to.

" }, + "sources":{ + "shape":"FindingSourceList", + "documentation":"

The sources of the finding. This indicates how the access that generated the finding is granted. It is populated for Amazon S3 bucket findings.

" + }, "status":{ "shape":"FindingStatus", "documentation":"

The status of the finding.

" diff --git a/services/acm/pom.xml b/services/acm/pom.xml index c69f3b34f6c9..31ece6e63aff 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 50c426108288..24370c67c940 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/alexaforbusiness/pom.xml b/services/alexaforbusiness/pom.xml index 1d25e747ebf0..2dda682dce21 100644 --- a/services/alexaforbusiness/pom.xml +++ b/services/alexaforbusiness/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 alexaforbusiness diff --git a/services/alexaforbusiness/src/main/resources/codegen-resources/service-2.json b/services/alexaforbusiness/src/main/resources/codegen-resources/service-2.json index 868cf9aa6799..700df59a1ee4 100644 --- a/services/alexaforbusiness/src/main/resources/codegen-resources/service-2.json +++ b/services/alexaforbusiness/src/main/resources/codegen-resources/service-2.json @@ -874,6 +874,7 @@ "errors":[ {"shape":"LimitExceededException"}, {"shape":"ConcurrentModificationException"}, + {"shape":"NotFoundException"}, {"shape":"InvalidDeviceException"} ], "documentation":"

Registers an Alexa-enabled device built by an Original Equipment Manufacturer (OEM) using Alexa Voice Service (AVS).

" @@ -1516,6 +1517,7 @@ }, "BusinessReportContentRange":{ "type":"structure", + "required":["Interval"], "members":{ "Interval":{ "shape":"BusinessReportInterval", @@ -1917,6 +1919,10 @@ "shape":"ClientRequestToken", "documentation":"

The client request token.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the business report schedule.

" } } }, @@ -2227,6 +2233,10 @@ "MeetingRoomConfiguration":{ "shape":"CreateMeetingRoomConfiguration", "documentation":"

The meeting room settings of a room profile.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the profile.

" } } }, @@ -2271,7 +2281,7 @@ }, "ProfileArn":{ "shape":"Arn", - "documentation":"

The profile ARN for the room.

" + "documentation":"

The profile ARN for the room. This is required.

" }, "ProviderCalendarId":{ "shape":"ProviderCalendarId", @@ -2313,6 +2323,10 @@ "shape":"ClientRequestToken", "documentation":"

A unique, user-specified identifier for this request that ensures idempotency.

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the skill group.

" } } }, @@ -2726,7 +2740,7 @@ "documentation":"

The room ARN associated with a device.

" }, "RoomName":{ - "shape":"RoomName", + "shape":"DeviceRoomName", "documentation":"

The name of the room associated with a device.

" }, "DeviceStatusInfo":{ @@ -2813,6 +2827,12 @@ "documentation":"

The request failed because this device is no longer registered and therefore no longer managed by this account.

", "exception":true }, + "DeviceRoomName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u0085\\u00A0-\\uD7FF\\uE000-\\uFFFD\\u10000-\\u10FFFF]*" + }, "DeviceSerialNumber":{ "type":"string", "pattern":"[a-zA-Z0-9]{1,200}" @@ -2862,7 +2882,9 @@ "INVALID_CERTIFICATE_AUTHORITY", "NETWORK_PROFILE_NOT_FOUND", "INVALID_PASSWORD_STATE", - "PASSWORD_NOT_FOUND" + "PASSWORD_NOT_FOUND", + "PASSWORD_MANAGER_ACCESS_DENIED", + "CERTIFICATE_AUTHORITY_ACCESS_DENIED" ] }, "DeviceStatusDetails":{ @@ -4401,7 +4423,6 @@ "ClientId", "UserCode", "ProductId", - "DeviceSerialNumber", "AmazonId" ], "members":{ @@ -4424,6 +4445,10 @@ "AmazonId":{ "shape":"AmazonId", "documentation":"

The device type ID for your AVS device generated by Amazon when the OEM creates a new product on Amazon's Developer Console.

" + }, + "RoomArn":{ + "shape":"Arn", + "documentation":"

The ARN of the room with which to associate your AVS device.

" } } }, @@ -5098,7 +5123,7 @@ }, "Reviews":{ "shape":"Reviews", - "documentation":"

The list of reviews for the skill, including Key and Value pair.

" + "documentation":"

This member has been deprecated.

The list of reviews for the skill, including Key and Value pair.

" }, "DeveloperInfo":{ "shape":"DeveloperInfo", diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 1b462f073271..1f82005ca6b1 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplify/src/main/resources/codegen-resources/service-2.json b/services/amplify/src/main/resources/codegen-resources/service-2.json index 076a7dd80c20..cab8cb0b5571 100644 --- a/services/amplify/src/main/resources/codegen-resources/service-2.json +++ b/services/amplify/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"LimitExceededException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Creates a new Amplify App.

" + "documentation":"

Creates a new Amplify app.

" }, "CreateBackendEnvironment":{ "name":"CreateBackendEnvironment", @@ -45,7 +45,7 @@ {"shape":"InternalFailureException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a new backend environment for an Amplify App.

" + "documentation":"

Creates a new backend environment for an Amplify app.

" }, "CreateBranch":{ "name":"CreateBranch", @@ -63,7 +63,7 @@ {"shape":"LimitExceededException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Creates a new Branch for an Amplify App.

" + "documentation":"

Creates a new branch for an Amplify app.

" }, "CreateDeployment":{ "name":"CreateDeployment", @@ -79,7 +79,7 @@ {"shape":"InternalFailureException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Create a deployment for manual deploy apps. (Apps are not connected to repository)

" + "documentation":"

Creates a deployment for a manually deployed Amplify app. Manually deployed apps are not connected to a repository.

" }, "CreateDomainAssociation":{ "name":"CreateDomainAssociation", @@ -97,7 +97,7 @@ {"shape":"LimitExceededException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Create a new DomainAssociation on an App

" + "documentation":"

Creates a new domain association for an Amplify app. This action associates a custom domain with the Amplify app

" }, "CreateWebhook":{ "name":"CreateWebhook", @@ -115,7 +115,7 @@ {"shape":"LimitExceededException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Create a new webhook on an App.

" + "documentation":"

Creates a new webhook on an Amplify app.

" }, "DeleteApp":{ "name":"DeleteApp", @@ -132,7 +132,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Delete an existing Amplify App by appId.

" + "documentation":"

Deletes an existing Amplify app specified by an app ID.

" }, "DeleteBackendEnvironment":{ "name":"DeleteBackendEnvironment", @@ -149,7 +149,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Delete backend environment for an Amplify App.

" + "documentation":"

Deletes a backend environment for an Amplify app.

" }, "DeleteBranch":{ "name":"DeleteBranch", @@ -166,7 +166,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Deletes a branch for an Amplify App.

" + "documentation":"

Deletes a branch for an Amplify app.

" }, "DeleteDomainAssociation":{ "name":"DeleteDomainAssociation", @@ -183,7 +183,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Deletes a DomainAssociation.

" + "documentation":"

Deletes a domain association for an Amplify app.

" }, "DeleteJob":{ "name":"DeleteJob", @@ -200,7 +200,7 @@ {"shape":"NotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Delete a job, for an Amplify branch, part of Amplify App.

" + "documentation":"

Deletes a job for a branch of an Amplify app.

" }, "DeleteWebhook":{ "name":"DeleteWebhook", @@ -233,7 +233,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Retrieve website access logs for a specific time range via a pre-signed URL.

" + "documentation":"

Returns the website access logs for a specific time range using a presigned URL.

" }, "GetApp":{ "name":"GetApp", @@ -249,7 +249,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Retrieves an existing Amplify App by appId.

" + "documentation":"

Returns an existing Amplify app by appID.

" }, "GetArtifactUrl":{ "name":"GetArtifactUrl", @@ -266,7 +266,7 @@ {"shape":"NotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Retrieves artifact info that corresponds to a artifactId.

" + "documentation":"

Returns the artifact info that corresponds to an artifact id.

" }, "GetBackendEnvironment":{ "name":"GetBackendEnvironment", @@ -282,7 +282,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Retrieves a backend environment for an Amplify App.

" + "documentation":"

Returns a backend environment for an Amplify app.

" }, "GetBranch":{ "name":"GetBranch", @@ -298,7 +298,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Retrieves a branch for an Amplify App.

" + "documentation":"

Returns a branch for an Amplify app.

" }, "GetDomainAssociation":{ "name":"GetDomainAssociation", @@ -314,7 +314,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Retrieves domain info that corresponds to an appId and domainName.

" + "documentation":"

Returns the domain information for an Amplify app.

" }, "GetJob":{ "name":"GetJob", @@ -331,7 +331,7 @@ {"shape":"NotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Get a job for a branch, part of an Amplify App.

" + "documentation":"

Returns a job for a branch of an Amplify app.

" }, "GetWebhook":{ "name":"GetWebhook", @@ -348,7 +348,7 @@ {"shape":"NotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Retrieves webhook info that corresponds to a webhookId.

" + "documentation":"

Returns the webhook information that corresponds to a specified webhook ID.

" }, "ListApps":{ "name":"ListApps", @@ -363,7 +363,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists existing Amplify Apps.

" + "documentation":"

Returns a list of the existing Amplify apps.

" }, "ListArtifacts":{ "name":"ListArtifacts", @@ -379,7 +379,7 @@ {"shape":"InternalFailureException"}, {"shape":"LimitExceededException"} ], - "documentation":"

List artifacts with an app, a branch, a job and an artifact type.

" + "documentation":"

Returns a list of artifacts for a specified app, branch, and job.

" }, "ListBackendEnvironments":{ "name":"ListBackendEnvironments", @@ -394,7 +394,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists backend environments for an Amplify App.

" + "documentation":"

Lists the backend environments for an Amplify app.

" }, "ListBranches":{ "name":"ListBranches", @@ -409,7 +409,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Lists branches for an Amplify App.

" + "documentation":"

Lists the branches of an Amplify app.

" }, "ListDomainAssociations":{ "name":"ListDomainAssociations", @@ -424,7 +424,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"} ], - "documentation":"

List domains with an app

" + "documentation":"

Returns the domain associations for an Amplify app.

" }, "ListJobs":{ "name":"ListJobs", @@ -440,7 +440,7 @@ {"shape":"InternalFailureException"}, {"shape":"LimitExceededException"} ], - "documentation":"

List Jobs for a branch, part of an Amplify App.

" + "documentation":"

Lists the jobs for a branch of an Amplify app.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -455,7 +455,7 @@ {"shape":"BadRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

List tags for resource.

" + "documentation":"

Returns a list of tags for a specified Amazon Resource Name (ARN).

" }, "ListWebhooks":{ "name":"ListWebhooks", @@ -471,7 +471,7 @@ {"shape":"InternalFailureException"}, {"shape":"LimitExceededException"} ], - "documentation":"

List webhooks with an app.

" + "documentation":"

Returns a list of webhooks for an Amplify app.

" }, "StartDeployment":{ "name":"StartDeployment", @@ -488,7 +488,7 @@ {"shape":"NotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Start a deployment for manual deploy apps. (Apps are not connected to repository)

" + "documentation":"

Starts a deployment for a manually deployed app. Manually deployed apps are not connected to a repository.

" }, "StartJob":{ "name":"StartJob", @@ -505,7 +505,7 @@ {"shape":"NotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Starts a new job for a branch, part of an Amplify App.

" + "documentation":"

Starts a new job for a branch of an Amplify app.

" }, "StopJob":{ "name":"StopJob", @@ -522,7 +522,7 @@ {"shape":"NotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Stop a job that is in progress, for an Amplify branch, part of Amplify App.

" + "documentation":"

Stops a job that is in progress for a branch of an Amplify app.

" }, "TagResource":{ "name":"TagResource", @@ -537,7 +537,7 @@ {"shape":"BadRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Tag resource with tag key and value.

" + "documentation":"

Tags the resource with a tag key and value.

" }, "UntagResource":{ "name":"UntagResource", @@ -552,7 +552,7 @@ {"shape":"BadRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Untag resource with resourceArn.

" + "documentation":"

Untags a resource with a specified Amazon Resource Name (ARN).

" }, "UpdateApp":{ "name":"UpdateApp", @@ -568,7 +568,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Updates an existing Amplify App.

" + "documentation":"

Updates an existing Amplify app.

" }, "UpdateBranch":{ "name":"UpdateBranch", @@ -585,7 +585,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Updates a branch for an Amplify App.

" + "documentation":"

Updates a branch for an Amplify app.

" }, "UpdateDomainAssociation":{ "name":"UpdateDomainAssociation", @@ -602,7 +602,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Create a new DomainAssociation on an App

" + "documentation":"

Creates a new domain association for an Amplify app.

" }, "UpdateWebhook":{ "name":"UpdateWebhook", @@ -619,14 +619,15 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Update a webhook.

" + "documentation":"

Updates a webhook.

" } }, "shapes":{ "AccessToken":{ "type":"string", "max":255, - "min":1 + "min":1, + "sensitive":true }, "ActiveJobId":{ "type":"string", @@ -651,90 +652,94 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for the Amplify App.

" + "documentation":"

The unique ID of the Amplify app.

" }, "appArn":{ "shape":"AppArn", - "documentation":"

ARN for the Amplify App.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amplify app.

" }, "name":{ "shape":"Name", - "documentation":"

Name for the Amplify App.

" + "documentation":"

The name for the Amplify app.

" }, "tags":{ "shape":"TagMap", - "documentation":"

Tag for Amplify App.

" + "documentation":"

The tag for the Amplify app.

" }, "description":{ "shape":"Description", - "documentation":"

Description for the Amplify App.

" + "documentation":"

The description for the Amplify app.

" }, "repository":{ "shape":"Repository", - "documentation":"

Repository for the Amplify App.

" + "documentation":"

The repository for the Amplify app.

" }, "platform":{ "shape":"Platform", - "documentation":"

Platform for the Amplify App.

" + "documentation":"

The platform for the Amplify app.

" }, "createTime":{ "shape":"CreateTime", - "documentation":"

Create date / time for the Amplify App.

" + "documentation":"

Creates a date and time for the Amplify app.

" }, "updateTime":{ "shape":"UpdateTime", - "documentation":"

Update date / time for the Amplify App.

" + "documentation":"

Updates the date and time for the Amplify app.

" }, "iamServiceRoleArn":{ "shape":"ServiceRoleArn", - "documentation":"

IAM service role ARN for the Amplify App.

" + "documentation":"

The AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) of the Amplify app.

" }, "environmentVariables":{ "shape":"EnvironmentVariables", - "documentation":"

Environment Variables for the Amplify App.

" + "documentation":"

The environment variables for the Amplify app.

" }, "defaultDomain":{ "shape":"DefaultDomain", - "documentation":"

Default domain for the Amplify App.

" + "documentation":"

The default domain for the Amplify app.

" }, "enableBranchAutoBuild":{ "shape":"EnableBranchAutoBuild", - "documentation":"

Enables auto-building of branches for the Amplify App.

" + "documentation":"

Enables the auto-building of branches for the Amplify app.

" + }, + "enableBranchAutoDeletion":{ + "shape":"EnableBranchAutoDeletion", + "documentation":"

Automatically disconnect a branch in the Amplify Console when you delete a branch from your Git repository.

" }, "enableBasicAuth":{ "shape":"EnableBasicAuth", - "documentation":"

Enables Basic Authorization for branches for the Amplify App.

" + "documentation":"

Enables basic authorization for the Amplify app's branches.

" }, "basicAuthCredentials":{ "shape":"BasicAuthCredentials", - "documentation":"

Basic Authorization credentials for branches for the Amplify App.

" + "documentation":"

The basic authorization credentials for branches for the Amplify app.

" }, "customRules":{ "shape":"CustomRules", - "documentation":"

Custom redirect / rewrite rules for the Amplify App.

" + "documentation":"

Describes the custom redirect and rewrite rules for the Amplify app.

" }, "productionBranch":{ "shape":"ProductionBranch", - "documentation":"

Structure with Production Branch information.

" + "documentation":"

Describes the information about a production branch of the Amplify app.

" }, "buildSpec":{ "shape":"BuildSpec", - "documentation":"

BuildSpec content for Amplify App.

" + "documentation":"

Describes the content of the build specification (build spec) for the Amplify app.

" }, "enableAutoBranchCreation":{ "shape":"EnableAutoBranchCreation", - "documentation":"

Enables automated branch creation for the Amplify App.

" + "documentation":"

Enables automated branch creation for the Amplify app.

" }, "autoBranchCreationPatterns":{ "shape":"AutoBranchCreationPatterns", - "documentation":"

Automated branch creation glob patterns for the Amplify App.

" + "documentation":"

Describes the automated branch creation glob patterns for the Amplify app.

" }, "autoBranchCreationConfig":{ "shape":"AutoBranchCreationConfig", - "documentation":"

Automated branch creation config for the Amplify App.

" + "documentation":"

Describes the automated branch creation configuration for the Amplify app.

" } }, - "documentation":"

Amplify App represents different branches of a repository for building, deploying, and hosting.

" + "documentation":"

Represents the different branches of a repository for building, deploying, and hosting an Amplify app.

" }, "AppArn":{ "type":"string", @@ -758,14 +763,14 @@ "members":{ "artifactFileName":{ "shape":"ArtifactFileName", - "documentation":"

File name for the artifact.

" + "documentation":"

The file name for the artifact.

" }, "artifactId":{ "shape":"ArtifactId", - "documentation":"

Unique Id for a artifact.

" + "documentation":"

The unique ID for the artifact.

" } }, - "documentation":"

Structure for artifact.

" + "documentation":"

Describes an artifact.

" }, "ArtifactFileName":{ "type":"string", @@ -801,42 +806,42 @@ "members":{ "stage":{ "shape":"Stage", - "documentation":"

Stage for the auto created branch.

" + "documentation":"

Describes the current stage for the autocreated branch.

" }, "framework":{ "shape":"Framework", - "documentation":"

Framework for the auto created branch.

" + "documentation":"

The framework for the autocreated branch.

" }, "enableAutoBuild":{ "shape":"EnableAutoBuild", - "documentation":"

Enables auto building for the auto created branch.

" + "documentation":"

Enables auto building for the autocreated branch.

" }, "environmentVariables":{ "shape":"EnvironmentVariables", - "documentation":"

Environment Variables for the auto created branch.

" + "documentation":"

The environment variables for the autocreated branch.

" }, "basicAuthCredentials":{ "shape":"BasicAuthCredentials", - "documentation":"

Basic Authorization credentials for the auto created branch.

" + "documentation":"

The basic authorization credentials for the autocreated branch.

" }, "enableBasicAuth":{ "shape":"EnableBasicAuth", - "documentation":"

Enables Basic Auth for the auto created branch.

" + "documentation":"

Enables basic authorization for the autocreated branch.

" }, "buildSpec":{ "shape":"BuildSpec", - "documentation":"

BuildSpec for the auto created branch.

" + "documentation":"

The build specification (build spec) for the autocreated branch.

" }, "enablePullRequestPreview":{ "shape":"EnablePullRequestPreview", - "documentation":"

Enables Pull Request Preview for auto created branch.

" + "documentation":"

Enables pull request preview for the autocreated branch.

" }, "pullRequestEnvironmentName":{ "shape":"PullRequestEnvironmentName", - "documentation":"

The Amplify Environment name for the pull request.

" + "documentation":"

The Amplify environment name for the pull request.

" } }, - "documentation":"

Structure with auto branch creation config.

" + "documentation":"

Describes the automated branch creation configuration.

" }, "AutoBranchCreationPattern":{ "type":"string", @@ -847,6 +852,20 @@ "type":"list", "member":{"shape":"AutoBranchCreationPattern"} }, + "AutoSubDomainCreationPattern":{ + "type":"string", + "max":2048, + "min":1 + }, + "AutoSubDomainCreationPatterns":{ + "type":"list", + "member":{"shape":"AutoSubDomainCreationPattern"} + }, + "AutoSubDomainIAMRole":{ + "type":"string", + "max":1000, + "pattern":"^$|^arn:aws:iam::\\d{12}:role.+" + }, "BackendEnvironment":{ "type":"structure", "required":[ @@ -858,30 +877,30 @@ "members":{ "backendEnvironmentArn":{ "shape":"BackendEnvironmentArn", - "documentation":"

Arn for a backend environment, part of an Amplify App.

" + "documentation":"

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.

" }, "environmentName":{ "shape":"EnvironmentName", - "documentation":"

Name for a backend environment, part of an Amplify App.

" + "documentation":"

The name for a backend environment that is part of an Amplify app.

" }, "stackName":{ "shape":"StackName", - "documentation":"

CloudFormation stack name of backend environment.

" + "documentation":"

The AWS CloudFormation stack name of a backend environment.

" }, "deploymentArtifacts":{ "shape":"DeploymentArtifacts", - "documentation":"

Name of deployment artifacts.

" + "documentation":"

The name of deployment artifacts.

" }, "createTime":{ "shape":"CreateTime", - "documentation":"

Creation date and time for a backend environment, part of an Amplify App.

" + "documentation":"

The creation date and time for a backend environment that is part of an Amplify app.

" }, "updateTime":{ "shape":"UpdateTime", - "documentation":"

Last updated date and time for a backend environment, part of an Amplify App.

" + "documentation":"

The last updated date and time for a backend environment that is part of an Amplify app.

" } }, - "documentation":"

Backend environment for an Amplify App.

" + "documentation":"

Describes the backend environment for an Amplify app.

" }, "BackendEnvironmentArn":{ "type":"string", @@ -897,13 +916,14 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

Exception thrown when a request contains unexpected data.

", + "documentation":"

A request contains unexpected data.

", "error":{"httpStatusCode":400}, "exception":true }, "BasicAuthCredentials":{ "type":"string", - "max":2000 + "max":2000, + "sensitive":true }, "Branch":{ "type":"structure", @@ -929,95 +949,95 @@ "members":{ "branchArn":{ "shape":"BranchArn", - "documentation":"

ARN for a branch, part of an Amplify App.

" + "documentation":"

The Amazon Resource Name (ARN) for a branch that is part of an Amplify app.

" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for a branch, part of an Amplify App.

" + "documentation":"

The name for the branch that is part of an Amplify app.

" }, "description":{ "shape":"Description", - "documentation":"

Description for a branch, part of an Amplify App.

" + "documentation":"

The description for the branch that is part of an Amplify app.

" }, "tags":{ "shape":"TagMap", - "documentation":"

Tag for branch for Amplify App.

" + "documentation":"

The tag for the branch of an Amplify app.

" }, "stage":{ "shape":"Stage", - "documentation":"

Stage for a branch, part of an Amplify App.

" + "documentation":"

The current stage for the branch that is part of an Amplify app.

" }, "displayName":{ "shape":"DisplayName", - "documentation":"

Display name for a branch, will use as the default domain prefix.

" + "documentation":"

The display name for the branch. This is used as the default domain prefix.

" }, "enableNotification":{ "shape":"EnableNotification", - "documentation":"

Enables notifications for a branch, part of an Amplify App.

" + "documentation":"

Enables notifications for a branch that is part of an Amplify app.

" }, "createTime":{ "shape":"CreateTime", - "documentation":"

Creation date and time for a branch, part of an Amplify App.

" + "documentation":"

The creation date and time for a branch that is part of an Amplify app.

" }, "updateTime":{ "shape":"UpdateTime", - "documentation":"

Last updated date and time for a branch, part of an Amplify App.

" + "documentation":"

The last updated date and time for a branch that is part of an Amplify app.

" }, "environmentVariables":{ "shape":"EnvironmentVariables", - "documentation":"

Environment Variables specific to a branch, part of an Amplify App.

" + "documentation":"

The environment variables specific to a branch of an Amplify app.

" }, "enableAutoBuild":{ "shape":"EnableAutoBuild", - "documentation":"

Enables auto-building on push for a branch, part of an Amplify App.

" + "documentation":"

Enables auto-building on push for a branch of an Amplify app.

" }, "customDomains":{ "shape":"CustomDomains", - "documentation":"

Custom domains for a branch, part of an Amplify App.

" + "documentation":"

The custom domains for a branch of an Amplify app.

" }, "framework":{ "shape":"Framework", - "documentation":"

Framework for a branch, part of an Amplify App.

" + "documentation":"

The framework for a branch of an Amplify app.

" }, "activeJobId":{ "shape":"ActiveJobId", - "documentation":"

Id of the active job for a branch, part of an Amplify App.

" + "documentation":"

The ID of the active job for a branch of an Amplify app.

" }, "totalNumberOfJobs":{ "shape":"TotalNumberOfJobs", - "documentation":"

Total number of Jobs part of an Amplify App.

" + "documentation":"

The total number of jobs that are part of an Amplify app.

" }, "enableBasicAuth":{ "shape":"EnableBasicAuth", - "documentation":"

Enables Basic Authorization for a branch, part of an Amplify App.

" + "documentation":"

Enables basic authorization for a branch of an Amplify app.

" }, "thumbnailUrl":{ "shape":"ThumbnailUrl", - "documentation":"

Thumbnail URL for the branch.

" + "documentation":"

The thumbnail URL for the branch of an Amplify app.

" }, "basicAuthCredentials":{ "shape":"BasicAuthCredentials", - "documentation":"

Basic Authorization credentials for a branch, part of an Amplify App.

" + "documentation":"

The basic authorization credentials for a branch of an Amplify app.

" }, "buildSpec":{ "shape":"BuildSpec", - "documentation":"

BuildSpec content for branch for Amplify App.

" + "documentation":"

The build specification (build spec) content for the branch of an Amplify app.

" }, "ttl":{ "shape":"TTL", - "documentation":"

The content TTL for the website in seconds.

" + "documentation":"

The content Time to Live (TTL) for the website in seconds.

" }, "associatedResources":{ "shape":"AssociatedResources", - "documentation":"

List of custom resources that are linked to this branch.

" + "documentation":"

A list of custom resources that are linked to this branch.

" }, "enablePullRequestPreview":{ "shape":"EnablePullRequestPreview", - "documentation":"

Enables Pull Request Preview for this branch.

" + "documentation":"

Enables pull request preview for the branch.

" }, "pullRequestEnvironmentName":{ "shape":"PullRequestEnvironmentName", - "documentation":"

The Amplify Environment name for the pull request.

" + "documentation":"

The Amplify environment name for the pull request.

" }, "destinationBranch":{ "shape":"BranchName", @@ -1029,10 +1049,10 @@ }, "backendEnvironmentArn":{ "shape":"BackendEnvironmentArn", - "documentation":"

ARN for a Backend Environment, part of an Amplify App.

" + "documentation":"

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.

" } }, - "documentation":"

Branch for an Amplify App, which maps to a 3rd party repository branch.

" + "documentation":"

The branch for an Amplify app, which maps to a third-party repository branch.

" }, "BranchArn":{ "type":"string", @@ -1050,7 +1070,7 @@ }, "BuildSpec":{ "type":"string", - "documentation":"

BuildSpec file for Amplify app build.

", + "documentation":"

The build specification (build spec) file for an Amplify app build.

", "max":25000, "min":1 }, @@ -1080,74 +1100,78 @@ "members":{ "name":{ "shape":"Name", - "documentation":"

Name for the Amplify App

" + "documentation":"

The name for the Amplify app.

" }, "description":{ "shape":"Description", - "documentation":"

Description for an Amplify App

" + "documentation":"

The description for an Amplify app.

" }, "repository":{ "shape":"Repository", - "documentation":"

Repository for an Amplify App

" + "documentation":"

The repository for an Amplify app.

" }, "platform":{ "shape":"Platform", - "documentation":"

Platform / framework for an Amplify App

" + "documentation":"

The platform or framework for an Amplify app.

" }, "iamServiceRoleArn":{ "shape":"ServiceRoleArn", - "documentation":"

AWS IAM service role for an Amplify App

" + "documentation":"

The AWS Identity and Access Management (IAM) service role for an Amplify app.

" }, "oauthToken":{ "shape":"OauthToken", - "documentation":"

OAuth token for 3rd party source control system for an Amplify App, used to create webhook and read-only deploy key. OAuth token is not stored.

" + "documentation":"

The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key. The OAuth token is not stored.

" }, "accessToken":{ "shape":"AccessToken", - "documentation":"

Personal Access token for 3rd party source control system for an Amplify App, used to create webhook and read-only deploy key. Token is not stored.

" + "documentation":"

The personal access token for a third-party source control system for an Amplify app. The personal access token is used to create a webhook and a read-only deploy key. The token is not stored.

" }, "environmentVariables":{ "shape":"EnvironmentVariables", - "documentation":"

Environment variables map for an Amplify App.

" + "documentation":"

The environment variables map for an Amplify app.

" }, "enableBranchAutoBuild":{ "shape":"EnableBranchAutoBuild", - "documentation":"

Enable the auto building of branches for an Amplify App.

" + "documentation":"

Enables the auto building of branches for an Amplify app.

" + }, + "enableBranchAutoDeletion":{ + "shape":"EnableBranchAutoDeletion", + "documentation":"

Automatically disconnects a branch in the Amplify Console when you delete a branch from your Git repository.

" }, "enableBasicAuth":{ "shape":"EnableBasicAuth", - "documentation":"

Enable Basic Authorization for an Amplify App, this will apply to all branches part of this App.

" + "documentation":"

Enables basic authorization for an Amplify app. This will apply to all branches that are part of this app.

" }, "basicAuthCredentials":{ "shape":"BasicAuthCredentials", - "documentation":"

Credentials for Basic Authorization for an Amplify App.

" + "documentation":"

The credentials for basic authorization for an Amplify app.

" }, "customRules":{ "shape":"CustomRules", - "documentation":"

Custom rewrite / redirect rules for an Amplify App.

" + "documentation":"

The custom rewrite and redirect rules for an Amplify app.

" }, "tags":{ "shape":"TagMap", - "documentation":"

Tag for an Amplify App

" + "documentation":"

The tag for an Amplify app.

" }, "buildSpec":{ "shape":"BuildSpec", - "documentation":"

BuildSpec for an Amplify App

" + "documentation":"

The build specification (build spec) for an Amplify app.

" }, "enableAutoBranchCreation":{ "shape":"EnableAutoBranchCreation", - "documentation":"

Enables automated branch creation for the Amplify App.

" + "documentation":"

Enables automated branch creation for the Amplify app.

" }, "autoBranchCreationPatterns":{ "shape":"AutoBranchCreationPatterns", - "documentation":"

Automated branch creation glob patterns for the Amplify App.

" + "documentation":"

The automated branch creation glob patterns for the Amplify app.

" }, "autoBranchCreationConfig":{ "shape":"AutoBranchCreationConfig", - "documentation":"

Automated branch creation config for the Amplify App.

" + "documentation":"

The automated branch creation configuration for the Amplify app.

" } }, - "documentation":"

Request structure used to create Apps in Amplify.

" + "documentation":"

The request structure used to create apps in Amplify.

" }, "CreateAppResult":{ "type":"structure", @@ -1165,24 +1189,24 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "environmentName":{ "shape":"EnvironmentName", - "documentation":"

Name for the backend environment.

" + "documentation":"

The name for the backend environment.

" }, "stackName":{ "shape":"StackName", - "documentation":"

CloudFormation stack name of backend environment.

" + "documentation":"

The AWS CloudFormation stack name of a backend environment.

" }, "deploymentArtifacts":{ "shape":"DeploymentArtifacts", - "documentation":"

Name of deployment artifacts.

" + "documentation":"

The name of deployment artifacts.

" } }, - "documentation":"

Request structure for a backend environment create request.

" + "documentation":"

The request structure for the backend environment create request.

" }, "CreateBackendEnvironmentResult":{ "type":"structure", @@ -1190,10 +1214,10 @@ "members":{ "backendEnvironment":{ "shape":"BackendEnvironment", - "documentation":"

Backend environment structure for an amplify App.

" + "documentation":"

Describes the backend environment for an Amplify app.

" } }, - "documentation":"

Result structure for create backend environment.

" + "documentation":"

The result structure for the create backend environment request.

" }, "CreateBranchRequest":{ "type":"structure", @@ -1204,25 +1228,25 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for the branch.

" + "documentation":"

The name for the branch.

" }, "description":{ "shape":"Description", - "documentation":"

Description for the branch.

" + "documentation":"

The description for the branch.

" }, "stage":{ "shape":"Stage", - "documentation":"

Stage for the branch.

" + "documentation":"

Describes the current stage for the branch.

" }, "framework":{ "shape":"Framework", - "documentation":"

Framework for the branch.

" + "documentation":"

The framework for the branch.

" }, "enableNotification":{ "shape":"EnableNotification", @@ -1234,46 +1258,46 @@ }, "environmentVariables":{ "shape":"EnvironmentVariables", - "documentation":"

Environment Variables for the branch.

" + "documentation":"

The environment variables for the branch.

" }, "basicAuthCredentials":{ "shape":"BasicAuthCredentials", - "documentation":"

Basic Authorization credentials for the branch.

" + "documentation":"

The basic authorization credentials for the branch.

" }, "enableBasicAuth":{ "shape":"EnableBasicAuth", - "documentation":"

Enables Basic Auth for the branch.

" + "documentation":"

Enables basic authorization for the branch.

" }, "tags":{ "shape":"TagMap", - "documentation":"

Tag for the branch.

" + "documentation":"

The tag for the branch.

" }, "buildSpec":{ "shape":"BuildSpec", - "documentation":"

BuildSpec for the branch.

" + "documentation":"

The build specification (build spec) for the branch.

" }, "ttl":{ "shape":"TTL", - "documentation":"

The content TTL for the website in seconds.

" + "documentation":"

The content Time To Live (TTL) for the website in seconds.

" }, "displayName":{ "shape":"DisplayName", - "documentation":"

Display name for a branch, will use as the default domain prefix.

" + "documentation":"

The display name for a branch. This is used as the default domain prefix.

" }, "enablePullRequestPreview":{ "shape":"EnablePullRequestPreview", - "documentation":"

Enables Pull Request Preview for this branch.

" + "documentation":"

Enables pull request preview for this branch.

" }, "pullRequestEnvironmentName":{ "shape":"PullRequestEnvironmentName", - "documentation":"

The Amplify Environment name for the pull request.

" + "documentation":"

The Amplify environment name for the pull request.

" }, "backendEnvironmentArn":{ "shape":"BackendEnvironmentArn", - "documentation":"

ARN for a Backend Environment, part of an Amplify App.

" + "documentation":"

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.

" } }, - "documentation":"

Request structure for a branch create request.

" + "documentation":"

The request structure for the create branch request.

" }, "CreateBranchResult":{ "type":"structure", @@ -1281,10 +1305,10 @@ "members":{ "branch":{ "shape":"Branch", - "documentation":"

Branch structure for an Amplify App.

" + "documentation":"

Describes the branch for an Amplify app, which maps to a third-party repository branch.

" } }, - "documentation":"

Result structure for create branch request.

" + "documentation":"

The result structure for create branch request.

" }, "CreateDeploymentRequest":{ "type":"structure", @@ -1295,22 +1319,22 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for the branch, for the Job.

", + "documentation":"

The name for the branch, for the job.

", "location":"uri", "locationName":"branchName" }, "fileMap":{ "shape":"FileMap", - "documentation":"

Optional file map that contains file name as the key and file content md5 hash as the value. If this argument is provided, the service will generate different upload url per file. Otherwise, the service will only generate a single upload url for the zipped files.

" + "documentation":"

An optional file map that contains the file name as the key and the file content md5 hash as the value. If this argument is provided, the service will generate a unique upload URL per file. Otherwise, the service will only generate a single upload URL for the zipped files.

" } }, - "documentation":"

Request structure for create a new deployment.

" + "documentation":"

The request structure for the create a new deployment request.

" }, "CreateDeploymentResult":{ "type":"structure", @@ -1321,18 +1345,18 @@ "members":{ "jobId":{ "shape":"JobId", - "documentation":"

The jobId for this deployment, will supply to start deployment api.

" + "documentation":"

The job ID for this deployment. will supply to start deployment api.

" }, "fileUploadUrls":{ "shape":"FileUploadUrls", - "documentation":"

When the fileMap argument is provided in the request, the fileUploadUrls will contain a map of file names to upload url.

" + "documentation":"

When the fileMap argument is provided in the request, fileUploadUrls will contain a map of file names to upload URLs.

" }, "zipUploadUrl":{ "shape":"UploadUrl", - "documentation":"

When the fileMap argument is NOT provided. This zipUploadUrl will be returned.

" + "documentation":"

When the fileMap argument is not provided in the request, this zipUploadUrl is returned.

" } }, - "documentation":"

Result structure for create a new deployment.

" + "documentation":"

The result structure for the create a new deployment request.

" }, "CreateDomainAssociationRequest":{ "type":"structure", @@ -1344,24 +1368,32 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "domainName":{ "shape":"DomainName", - "documentation":"

Domain name for the Domain Association.

" + "documentation":"

The domain name for the domain association.

" }, "enableAutoSubDomain":{ "shape":"EnableAutoSubDomain", - "documentation":"

Enables automated creation of Subdomains for branches. (Currently not supported)

" + "documentation":"

Enables the automated creation of subdomains for branches.

" }, "subDomainSettings":{ "shape":"SubDomainSettings", - "documentation":"

Setting structure for the Subdomain.

" + "documentation":"

The setting for the subdomain.

" + }, + "autoSubDomainCreationPatterns":{ + "shape":"AutoSubDomainCreationPatterns", + "documentation":"

Sets the branch patterns for automatic subdomain creation.

" + }, + "autoSubDomainIAMRole":{ + "shape":"AutoSubDomainIAMRole", + "documentation":"

The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.

" } }, - "documentation":"

Request structure for create Domain Association request.

" + "documentation":"

The request structure for the create domain association request.

" }, "CreateDomainAssociationResult":{ "type":"structure", @@ -1369,10 +1401,10 @@ "members":{ "domainAssociation":{ "shape":"DomainAssociation", - "documentation":"

Domain Association structure.

" + "documentation":"

Describes the structure of a domain association, which associates a custom domain with an Amplify app.

" } }, - "documentation":"

Result structure for the create Domain Association request.

" + "documentation":"

The result structure for the create domain association request.

" }, "CreateTime":{"type":"timestamp"}, "CreateWebhookRequest":{ @@ -1384,20 +1416,20 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for a branch, part of an Amplify App.

" + "documentation":"

The name for a branch that is part of an Amplify app.

" }, "description":{ "shape":"Description", - "documentation":"

Description for a webhook.

" + "documentation":"

The description for a webhook.

" } }, - "documentation":"

Request structure for create webhook request.

" + "documentation":"

The request structure for the create webhook request.

" }, "CreateWebhookResult":{ "type":"structure", @@ -1405,10 +1437,10 @@ "members":{ "webhook":{ "shape":"Webhook", - "documentation":"

Webhook structure.

" + "documentation":"

Describes a webhook that connects repository events to an Amplify app.

" } }, - "documentation":"

Result structure for the create webhook request.

" + "documentation":"

The result structure for the create webhook request.

" }, "CustomDomain":{ "type":"string", @@ -1440,10 +1472,10 @@ }, "condition":{ "shape":"Condition", - "documentation":"

The condition for a URL rewrite or redirect rule, e.g. country code.

" + "documentation":"

The condition for a URL rewrite or redirect rule, such as a country code.

" } }, - "documentation":"

Custom rewrite / redirect rule.

" + "documentation":"

Describes a custom rewrite or redirect rule.

" }, "CustomRules":{ "type":"list", @@ -1464,12 +1496,12 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" } }, - "documentation":"

Request structure for an Amplify App delete request.

" + "documentation":"

Describes the request structure for the delete app request.

" }, "DeleteAppResult":{ "type":"structure", @@ -1477,7 +1509,7 @@ "members":{ "app":{"shape":"App"} }, - "documentation":"

Result structure for an Amplify App delete request.

" + "documentation":"

The result structure for the delete app request.

" }, "DeleteBackendEnvironmentRequest":{ "type":"structure", @@ -1488,18 +1520,18 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id of an Amplify App.

", + "documentation":"

The unique ID of an Amplify app.

", "location":"uri", "locationName":"appId" }, "environmentName":{ "shape":"EnvironmentName", - "documentation":"

Name of a backend environment of an Amplify App.

", + "documentation":"

The name of a backend environment of an Amplify app.

", "location":"uri", "locationName":"environmentName" } }, - "documentation":"

Request structure for delete backend environment request.

" + "documentation":"

The request structure for the delete backend environment request.

" }, "DeleteBackendEnvironmentResult":{ "type":"structure", @@ -1507,10 +1539,10 @@ "members":{ "backendEnvironment":{ "shape":"BackendEnvironment", - "documentation":"

Backend environment structure for an Amplify App.

" + "documentation":"

Describes the backend environment for an Amplify app.

" } }, - "documentation":"

Result structure of a delete backend environment result.

" + "documentation":"

The result structure of the delete backend environment result.

" }, "DeleteBranchRequest":{ "type":"structure", @@ -1521,18 +1553,18 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for the branch.

", + "documentation":"

The name for the branch.

", "location":"uri", "locationName":"branchName" } }, - "documentation":"

Request structure for delete branch request.

" + "documentation":"

The request structure for the delete branch request.

" }, "DeleteBranchResult":{ "type":"structure", @@ -1540,10 +1572,10 @@ "members":{ "branch":{ "shape":"Branch", - "documentation":"

Branch structure for an Amplify App.

" + "documentation":"

The branch for an Amplify app, which maps to a third-party repository branch.

" } }, - "documentation":"

Result structure for delete branch request.

" + "documentation":"

The result structure for the delete branch request.

" }, "DeleteDomainAssociationRequest":{ "type":"structure", @@ -1554,18 +1586,18 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique id for an Amplify app.

", "location":"uri", "locationName":"appId" }, "domainName":{ "shape":"DomainName", - "documentation":"

Name of the domain.

", + "documentation":"

The name of the domain.

", "location":"uri", "locationName":"domainName" } }, - "documentation":"

Request structure for the delete Domain Association request.

" + "documentation":"

The request structure for the delete domain association request.

" }, "DeleteDomainAssociationResult":{ "type":"structure", @@ -1584,24 +1616,24 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for the branch, for the Job.

", + "documentation":"

The name for the branch, for the job.

", "location":"uri", "locationName":"branchName" }, "jobId":{ "shape":"JobId", - "documentation":"

Unique Id for the Job.

", + "documentation":"

The unique ID for the job.

", "location":"uri", "locationName":"jobId" } }, - "documentation":"

Request structure for delete job request.

" + "documentation":"

The request structure for the delete job request.

" }, "DeleteJobResult":{ "type":"structure", @@ -1609,7 +1641,7 @@ "members":{ "jobSummary":{"shape":"JobSummary"} }, - "documentation":"

Result structure for the delete job request.

" + "documentation":"

The result structure for the delete job request.

" }, "DeleteWebhookRequest":{ "type":"structure", @@ -1617,12 +1649,12 @@ "members":{ "webhookId":{ "shape":"WebhookId", - "documentation":"

Unique Id for a webhook.

", + "documentation":"

The unique ID for a webhook.

", "location":"uri", "locationName":"webhookId" } }, - "documentation":"

Request structure for the delete webhook request.

" + "documentation":"

The request structure for the delete webhook request.

" }, "DeleteWebhookResult":{ "type":"structure", @@ -1630,17 +1662,17 @@ "members":{ "webhook":{ "shape":"Webhook", - "documentation":"

Webhook structure.

" + "documentation":"

Describes a webhook that connects repository events to an Amplify app.

" } }, - "documentation":"

Result structure for the delete webhook request.

" + "documentation":"

The result structure for the delete webhook request.

" }, "DependentServiceFailureException":{ "type":"structure", "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

Exception thrown when an operation fails due to a dependent service throwing an exception.

", + "documentation":"

An operation failed because a dependent service threw an exception.

", "error":{"httpStatusCode":503}, "exception":true }, @@ -1670,34 +1702,42 @@ "members":{ "domainAssociationArn":{ "shape":"DomainAssociationArn", - "documentation":"

ARN for the Domain Association.

" + "documentation":"

The Amazon Resource Name (ARN) for the domain association.

" }, "domainName":{ "shape":"DomainName", - "documentation":"

Name of the domain.

" + "documentation":"

The name of the domain.

" }, "enableAutoSubDomain":{ "shape":"EnableAutoSubDomain", - "documentation":"

Enables automated creation of Subdomains for branches. (Currently not supported)

" + "documentation":"

Enables the automated creation of subdomains for branches.

" + }, + "autoSubDomainCreationPatterns":{ + "shape":"AutoSubDomainCreationPatterns", + "documentation":"

Sets branch patterns for automatic subdomain creation.

" + }, + "autoSubDomainIAMRole":{ + "shape":"AutoSubDomainIAMRole", + "documentation":"

The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.

" }, "domainStatus":{ "shape":"DomainStatus", - "documentation":"

Status fo the Domain Association.

" + "documentation":"

The current status of the domain association.

" }, "statusReason":{ "shape":"StatusReason", - "documentation":"

Reason for the current status of the Domain Association.

" + "documentation":"

The reason for the current status of the domain association.

" }, "certificateVerificationDNSRecord":{ "shape":"CertificateVerificationDNSRecord", - "documentation":"

DNS Record for certificate verification.

" + "documentation":"

The DNS record for certificate verification.

" }, "subDomains":{ "shape":"SubDomains", - "documentation":"

Subdomains for the Domain Association.

" + "documentation":"

The subdomains for the domain association.

" } }, - "documentation":"

Structure for Domain Association, which associates a custom domain with an Amplify App.

" + "documentation":"

Describes a domain association that associates a custom domain with an Amplify app.

" }, "DomainAssociationArn":{ "type":"string", @@ -1734,6 +1774,7 @@ "EnableAutoSubDomain":{"type":"boolean"}, "EnableBasicAuth":{"type":"boolean"}, "EnableBranchAutoBuild":{"type":"boolean"}, + "EnableBranchAutoDeletion":{"type":"boolean"}, "EnableNotification":{"type":"boolean"}, "EnablePullRequestPreview":{"type":"boolean"}, "EndTime":{"type":"timestamp"}, @@ -1786,34 +1827,34 @@ "members":{ "startTime":{ "shape":"StartTime", - "documentation":"

The time at which the logs should start, inclusive.

" + "documentation":"

The time at which the logs should start. The time range specified is inclusive of the start time.

" }, "endTime":{ "shape":"EndTime", - "documentation":"

The time at which the logs should end, inclusive.

" + "documentation":"

The time at which the logs should end. The time range specified is inclusive of the end time.

" }, "domainName":{ "shape":"DomainName", - "documentation":"

Name of the domain.

" + "documentation":"

The name of the domain.

" }, "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" } }, - "documentation":"

Request structure for the generate access logs request.

" + "documentation":"

The request structure for the generate access logs request.

" }, "GenerateAccessLogsResult":{ "type":"structure", "members":{ "logUrl":{ "shape":"LogUrl", - "documentation":"

Pre-signed URL for the requested access logs.

" + "documentation":"

The pre-signed URL for the requested access logs.

" } }, - "documentation":"

Result structure for the generate access logs request.

" + "documentation":"

The result structure for the generate access logs request.

" }, "GetAppRequest":{ "type":"structure", @@ -1821,12 +1862,12 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" } }, - "documentation":"

Request structure for get App request.

" + "documentation":"

The request structure for the get app request.

" }, "GetAppResult":{ "type":"structure", @@ -1841,12 +1882,12 @@ "members":{ "artifactId":{ "shape":"ArtifactId", - "documentation":"

Unique Id for a artifact.

", + "documentation":"

The unique ID for an artifact.

", "location":"uri", "locationName":"artifactId" } }, - "documentation":"

Request structure for the get artifact request.

" + "documentation":"

Returns the request structure for the get artifact request.

" }, "GetArtifactUrlResult":{ "type":"structure", @@ -1857,14 +1898,14 @@ "members":{ "artifactId":{ "shape":"ArtifactId", - "documentation":"

Unique Id for a artifact.

" + "documentation":"

The unique ID for an artifact.

" }, "artifactUrl":{ "shape":"ArtifactUrl", - "documentation":"

Presigned url for the artifact.

" + "documentation":"

The presigned URL for the artifact.

" } }, - "documentation":"

Result structure for the get artifact request.

" + "documentation":"

Returns the result structure for the get artifact request.

" }, "GetBackendEnvironmentRequest":{ "type":"structure", @@ -1875,18 +1916,18 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique id for an Amplify app.

", "location":"uri", "locationName":"appId" }, "environmentName":{ "shape":"EnvironmentName", - "documentation":"

Name for the backend environment.

", + "documentation":"

The name for the backend environment.

", "location":"uri", "locationName":"environmentName" } }, - "documentation":"

Request structure for get backend environment request.

" + "documentation":"

The request structure for the get backend environment request.

" }, "GetBackendEnvironmentResult":{ "type":"structure", @@ -1894,10 +1935,10 @@ "members":{ "backendEnvironment":{ "shape":"BackendEnvironment", - "documentation":"

Backend environment structure for an an Amplify App.

" + "documentation":"

Describes the backend environment for an Amplify app.

" } }, - "documentation":"

Result structure for get backend environment result.

" + "documentation":"

The result structure for the get backend environment result.

" }, "GetBranchRequest":{ "type":"structure", @@ -1908,18 +1949,18 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for the branch.

", + "documentation":"

The name for the branch.

", "location":"uri", "locationName":"branchName" } }, - "documentation":"

Request structure for get branch request.

" + "documentation":"

The request structure for the get branch request.

" }, "GetBranchResult":{ "type":"structure", @@ -1937,18 +1978,18 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique id for an Amplify app.

", "location":"uri", "locationName":"appId" }, "domainName":{ "shape":"DomainName", - "documentation":"

Name of the domain.

", + "documentation":"

The name of the domain.

", "location":"uri", "locationName":"domainName" } }, - "documentation":"

Request structure for the get Domain Association request.

" + "documentation":"

The request structure for the get domain association request.

" }, "GetDomainAssociationResult":{ "type":"structure", @@ -1956,10 +1997,10 @@ "members":{ "domainAssociation":{ "shape":"DomainAssociation", - "documentation":"

Domain Association structure.

" + "documentation":"

Describes the structure of a domain association, which associates a custom domain with an Amplify app.

" } }, - "documentation":"

Result structure for the get Domain Association request.

" + "documentation":"

The result structure for the get domain association request.

" }, "GetJobRequest":{ "type":"structure", @@ -1971,24 +2012,24 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for the branch, for the Job.

", + "documentation":"

The branch name for the job.

", "location":"uri", "locationName":"branchName" }, "jobId":{ "shape":"JobId", - "documentation":"

Unique Id for the Job.

", + "documentation":"

The unique ID for the job.

", "location":"uri", "locationName":"jobId" } }, - "documentation":"

Request structure for get job request.

" + "documentation":"

The request structure for the get job request.

" }, "GetJobResult":{ "type":"structure", @@ -2003,12 +2044,12 @@ "members":{ "webhookId":{ "shape":"WebhookId", - "documentation":"

Unique Id for a webhook.

", + "documentation":"

The unique ID for a webhook.

", "location":"uri", "locationName":"webhookId" } }, - "documentation":"

Request structure for the get webhook request.

" + "documentation":"

The request structure for the get webhook request.

" }, "GetWebhookResult":{ "type":"structure", @@ -2016,17 +2057,17 @@ "members":{ "webhook":{ "shape":"Webhook", - "documentation":"

Webhook structure.

" + "documentation":"

Describes the structure of a webhook.

" } }, - "documentation":"

Result structure for the get webhook request.

" + "documentation":"

The result structure for the get webhook request.

" }, "InternalFailureException":{ "type":"structure", "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

Exception thrown when the service fails to perform an operation due to an internal issue.

", + "documentation":"

The service failed to perform an operation due to an internal issue.

", "error":{"httpStatusCode":500}, "exception":true, "fault":true @@ -2040,14 +2081,14 @@ "members":{ "summary":{ "shape":"JobSummary", - "documentation":"

Summary for an execution job for an Amplify App.

" + "documentation":"

Describes the summary for an execution job for an Amplify app.

" }, "steps":{ "shape":"Steps", - "documentation":"

Execution steps for an execution job, for an Amplify App.

" + "documentation":"

The execution steps for an execution job, for an Amplify app.

" } }, - "documentation":"

Structure for an execution job for an Amplify App.

" + "documentation":"

Describes an execution job for an Amplify app.

" }, "JobArn":{ "type":"string", @@ -2092,42 +2133,42 @@ "members":{ "jobArn":{ "shape":"JobArn", - "documentation":"

Arn for the Job.

" + "documentation":"

The Amazon Resource Name (ARN) for the job.

" }, "jobId":{ "shape":"JobId", - "documentation":"

Unique Id for the Job.

" + "documentation":"

The unique ID for the job.

" }, "commitId":{ "shape":"CommitId", - "documentation":"

Commit Id from 3rd party repository provider for the Job.

" + "documentation":"

The commit ID from a third-party repository provider for the job.

" }, "commitMessage":{ "shape":"CommitMessage", - "documentation":"

Commit message from 3rd party repository provider for the Job.

" + "documentation":"

The commit message from a third-party repository provider for the job.

" }, "commitTime":{ "shape":"CommitTime", - "documentation":"

Commit date / time for the Job.

" + "documentation":"

The commit date and time for the job.

" }, "startTime":{ "shape":"StartTime", - "documentation":"

Start date / time for the Job.

" + "documentation":"

The start date and time for the job.

" }, "status":{ "shape":"JobStatus", - "documentation":"

Status for the Job.

" + "documentation":"

The current status for the job.

" }, "endTime":{ "shape":"EndTime", - "documentation":"

End date / time for the Job.

" + "documentation":"

The end date and time for the job.

" }, "jobType":{ "shape":"JobType", - "documentation":"

Type for the Job. \\n \"RELEASE\": Manually released from source by using StartJob API. \"RETRY\": Manually retried by using StartJob API. \"WEB_HOOK\": Automatically triggered by WebHooks.

" + "documentation":"

The type for the job. If the value is RELEASE, the job was manually released from its source by using the StartJob API. If the value is RETRY, the job was manually retried using the StartJob API. If the value is WEB_HOOK, the job was automatically triggered by webhooks.

" } }, - "documentation":"

Structure for the summary of a Job.

" + "documentation":"

Describes the summary for an execution job for an Amplify app.

" }, "JobType":{ "type":"string", @@ -2145,7 +2186,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

Exception thrown when a resource could not be created because of service limits.

", + "documentation":"

A resource could not be created because service quotas were exceeded.

", "error":{"httpStatusCode":429}, "exception":true }, @@ -2154,18 +2195,18 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.

", + "documentation":"

A pagination token. If non-null, the pagination token is returned in a result. Pass its value in another request to retrieve more entries.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Maximum number of records to list in a single response.

", + "documentation":"

The maximum number of records to list in a single response.

", "location":"querystring", "locationName":"maxResults" } }, - "documentation":"

Request structure for an Amplify App list request.

" + "documentation":"

The request structure for the list apps request.

" }, "ListAppsResult":{ "type":"structure", @@ -2173,14 +2214,14 @@ "members":{ "apps":{ "shape":"Apps", - "documentation":"

List of Amplify Apps.

" + "documentation":"

A list of Amplify apps.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. Set to null to start listing Apps from start. If non-null pagination token is returned in a result, then pass its value in here to list more projects.

" + "documentation":"

A pagination token. Set to null to start listing apps from start. If non-null, the pagination token is returned in a result. Pass its value in here to list more projects.

" } }, - "documentation":"

Result structure for an Amplify App list request.

" + "documentation":"

The result structure for an Amplify app list request.

" }, "ListArtifactsRequest":{ "type":"structure", @@ -2192,36 +2233,36 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for a branch, part of an Amplify App.

", + "documentation":"

The name of a branch that is part of an Amplify app.

", "location":"uri", "locationName":"branchName" }, "jobId":{ "shape":"JobId", - "documentation":"

Unique Id for an Job.

", + "documentation":"

The unique ID for a job.

", "location":"uri", "locationName":"jobId" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. Set to null to start listing artifacts from start. If non-null pagination token is returned in a result, then pass its value in here to list more artifacts.

", + "documentation":"

A pagination token. Set to null to start listing artifacts from start. If a non-null pagination token is returned in a result, pass its value in here to list more artifacts.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Maximum number of records to list in a single response.

", + "documentation":"

The maximum number of records to list in a single response.

", "location":"querystring", "locationName":"maxResults" } }, - "documentation":"

Request structure for the list artifacts request.

" + "documentation":"

Describes the request structure for the list artifacts request.

" }, "ListArtifactsResult":{ "type":"structure", @@ -2229,14 +2270,14 @@ "members":{ "artifacts":{ "shape":"Artifacts", - "documentation":"

List of artifacts.

" + "documentation":"

A list of artifacts.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.

" + "documentation":"

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" } }, - "documentation":"

Result structure for the list artifacts request.

" + "documentation":"

The result structure for the list artifacts request.

" }, "ListBackendEnvironmentsRequest":{ "type":"structure", @@ -2244,28 +2285,30 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "environmentName":{ "shape":"EnvironmentName", - "documentation":"

Name of the backend environment

" + "documentation":"

The name of the backend environment

", + "location":"querystring", + "locationName":"environmentName" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. Set to null to start listing backen environments from start. If a non-null pagination token is returned in a result, then pass its value in here to list more backend environments.

", + "documentation":"

A pagination token. Set to null to start listing backend environments from the start. If a non-null pagination token is returned in a result, pass its value in here to list more backend environments.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Maximum number of records to list in a single response.

", + "documentation":"

The maximum number of records to list in a single response.

", "location":"querystring", "locationName":"maxResults" } }, - "documentation":"

Request structure for list backend environments request.

" + "documentation":"

The request structure for the list backend environments request.

" }, "ListBackendEnvironmentsResult":{ "type":"structure", @@ -2273,14 +2316,14 @@ "members":{ "backendEnvironments":{ "shape":"BackendEnvironments", - "documentation":"

List of backend environments for an Amplify App.

" + "documentation":"

The list of backend environments for an Amplify app.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.

" + "documentation":"

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" } }, - "documentation":"

Result structure for list backend environments result.

" + "documentation":"

The result structure for the list backend environments result.

" }, "ListBranchesRequest":{ "type":"structure", @@ -2288,24 +2331,24 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. Set to null to start listing branches from start. If a non-null pagination token is returned in a result, then pass its value in here to list more branches.

", + "documentation":"

A pagination token. Set to null to start listing branches from the start. If a non-null pagination token is returned in a result, pass its value in here to list more branches.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Maximum number of records to list in a single response.

", + "documentation":"

The maximum number of records to list in a single response.

", "location":"querystring", "locationName":"maxResults" } }, - "documentation":"

Request structure for list branches request.

" + "documentation":"

The request structure for the list branches request.

" }, "ListBranchesResult":{ "type":"structure", @@ -2313,14 +2356,14 @@ "members":{ "branches":{ "shape":"Branches", - "documentation":"

List of branches for an Amplify App.

" + "documentation":"

A list of branches for an Amplify app.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.

" + "documentation":"

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" } }, - "documentation":"

Result structure for list branches request.

" + "documentation":"

The result structure for the list branches request.

" }, "ListDomainAssociationsRequest":{ "type":"structure", @@ -2328,24 +2371,24 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. Set to null to start listing Apps from start. If non-null pagination token is returned in a result, then pass its value in here to list more projects.

", + "documentation":"

A pagination token. Set to null to start listing apps from the start. If non-null, a pagination token is returned in a result. Pass its value in here to list more projects.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Maximum number of records to list in a single response.

", + "documentation":"

The maximum number of records to list in a single response.

", "location":"querystring", "locationName":"maxResults" } }, - "documentation":"

Request structure for the list Domain Associations request.

" + "documentation":"

The request structure for the list domain associations request.

" }, "ListDomainAssociationsResult":{ "type":"structure", @@ -2353,14 +2396,14 @@ "members":{ "domainAssociations":{ "shape":"DomainAssociations", - "documentation":"

List of Domain Associations.

" + "documentation":"

A list of domain associations.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.

" + "documentation":"

A pagination token. If non-null, a pagination token is returned in a result. Pass its value in another request to retrieve more entries.

" } }, - "documentation":"

Result structure for the list Domain Association request.

" + "documentation":"

The result structure for the list domain association request.

" }, "ListJobsRequest":{ "type":"structure", @@ -2371,30 +2414,30 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for a branch.

", + "documentation":"

The name for a branch.

", "location":"uri", "locationName":"branchName" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. Set to null to start listing steps from start. If a non-null pagination token is returned in a result, then pass its value in here to list more steps.

", + "documentation":"

A pagination token. Set to null to start listing steps from the start. If a non-null pagination token is returned in a result, pass its value in here to list more steps.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Maximum number of records to list in a single response.

", + "documentation":"

The maximum number of records to list in a single response.

", "location":"querystring", "locationName":"maxResults" } }, - "documentation":"

Request structure for list job request.

" + "documentation":"

The request structure for the list jobs request.

" }, "ListJobsResult":{ "type":"structure", @@ -2402,14 +2445,14 @@ "members":{ "jobSummaries":{ "shape":"JobSummaries", - "documentation":"

Result structure for list job result request.

" + "documentation":"

The result structure for the list job result request.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.

" + "documentation":"

A pagination token. If non-null the pagination token is returned in a result. Pass its value in another request to retrieve more entries.

" } }, - "documentation":"

Maximum number of records to list in a single response.

" + "documentation":"

The maximum number of records to list in a single response.

" }, "ListTagsForResourceRequest":{ "type":"structure", @@ -2417,22 +2460,22 @@ "members":{ "resourceArn":{ "shape":"ResourceArn", - "documentation":"

Resource arn used to list tags.

", + "documentation":"

The Amazon Resource Name (ARN) to use to list tags.

", "location":"uri", "locationName":"resourceArn" } }, - "documentation":"

Request structure used to list tags for resource.

" + "documentation":"

The request structure to use to list tags for a resource.

" }, "ListTagsForResourceResponse":{ "type":"structure", "members":{ "tags":{ "shape":"TagMap", - "documentation":"

Tags result for response.

" + "documentation":"

A list of tags for the specified The Amazon Resource Name (ARN).

" } }, - "documentation":"

Response for list tags.

" + "documentation":"

The response for the list tags for resource request.

" }, "ListWebhooksRequest":{ "type":"structure", @@ -2440,24 +2483,24 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. Set to null to start listing webhooks from start. If non-null pagination token is returned in a result, then pass its value in here to list more webhooks.

", + "documentation":"

A pagination token. Set to null to start listing webhooks from the start. If non-null,the pagination token is returned in a result. Pass its value in here to list more webhooks.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

Maximum number of records to list in a single response.

", + "documentation":"

The maximum number of records to list in a single response.

", "location":"querystring", "locationName":"maxResults" } }, - "documentation":"

Request structure for the list webhooks request.

" + "documentation":"

The request structure for the list webhooks request.

" }, "ListWebhooksResult":{ "type":"structure", @@ -2465,14 +2508,14 @@ "members":{ "webhooks":{ "shape":"Webhooks", - "documentation":"

List of webhooks.

" + "documentation":"

A list of webhooks.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries.

" + "documentation":"

A pagination token. If non-null, the pagination token is returned in a result. Pass its value in another request to retrieve more entries.

" } }, - "documentation":"

Result structure for the list webhooks request.

" + "documentation":"

The result structure for the list webhooks request.

" }, "LogUrl":{ "type":"string", @@ -2501,13 +2544,14 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

Exception thrown when an entity has not been found during an operation.

", + "documentation":"

An entity was not found during an operation.

", "error":{"httpStatusCode":404}, "exception":true }, "OauthToken":{ "type":"string", - "max":100 + "max":100, + "sensitive":true }, "Platform":{ "type":"string", @@ -2518,22 +2562,22 @@ "members":{ "lastDeployTime":{ "shape":"LastDeployTime", - "documentation":"

Last Deploy Time of Production Branch.

" + "documentation":"

The last deploy time of the production branch.

" }, "status":{ "shape":"Status", - "documentation":"

Status of Production Branch.

" + "documentation":"

The status of the production branch.

" }, "thumbnailUrl":{ "shape":"ThumbnailUrl", - "documentation":"

Thumbnail URL for Production Branch.

" + "documentation":"

The thumbnail URL for the production branch.

" }, "branchName":{ "shape":"BranchName", - "documentation":"

Branch Name for Production Branch.

" + "documentation":"

The branch name for the production branch.

" } }, - "documentation":"

Structure with Production Branch information.

" + "documentation":"

Describes the information about a production branch for an Amplify app.

" }, "PullRequestEnvironmentName":{ "type":"string", @@ -2557,7 +2601,7 @@ "code":{"shape":"Code"}, "message":{"shape":"ErrorMessage"} }, - "documentation":"

Exception thrown when an operation fails due to non-existent resource.

", + "documentation":"

An operation failed due to a non-existent resource.

", "error":{"httpStatusCode":404}, "exception":true }, @@ -2604,26 +2648,26 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for the branch, for the Job.

", + "documentation":"

The name for the branch, for the job.

", "location":"uri", "locationName":"branchName" }, "jobId":{ "shape":"JobId", - "documentation":"

The job id for this deployment, generated by create deployment request.

" + "documentation":"

The job ID for this deployment, generated by the create deployment request.

" }, "sourceUrl":{ "shape":"SourceUrl", - "documentation":"

The sourceUrl for this deployment, used when calling start deployment without create deployment. SourceUrl can be any HTTP GET url that is public accessible and downloads a single zip.

" + "documentation":"

The source URL for this deployment, used when calling start deployment without create deployment. The source URL can be any HTTP GET URL that is publicly accessible and downloads a single .zip file.

" } }, - "documentation":"

Request structure for start a deployment.

" + "documentation":"

The request structure for the start a deployment request.

" }, "StartDeploymentResult":{ "type":"structure", @@ -2631,10 +2675,10 @@ "members":{ "jobSummary":{ "shape":"JobSummary", - "documentation":"

Summary for the Job.

" + "documentation":"

The summary for the job.

" } }, - "documentation":"

Result structure for start a deployment.

" + "documentation":"

The result structure for the start a deployment request.

" }, "StartJobRequest":{ "type":"structure", @@ -2646,42 +2690,42 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for the branch, for the Job.

", + "documentation":"

The branch name for the job.

", "location":"uri", "locationName":"branchName" }, "jobId":{ "shape":"JobId", - "documentation":"

Unique Id for an existing job. Required for \"RETRY\" JobType.

" + "documentation":"

The unique ID for an existing job. This is required if the value of jobType is RETRY.

" }, "jobType":{ "shape":"JobType", - "documentation":"

Type for the Job. Available JobTypes are: \\n \"RELEASE\": Start a new job with the latest change from the specified branch. Only available for apps that have connected to a repository. \"RETRY\": Retry an existing job. JobId is required for this type of job.

" + "documentation":"

Describes the type for the job. The job type RELEASE starts a new job with the latest change from the specified branch. This value is available only for apps that are connected to a repository. The job type RETRY retries an existing job. If the job type value is RETRY, the jobId is also required.

" }, "jobReason":{ "shape":"JobReason", - "documentation":"

Descriptive reason for starting this job.

" + "documentation":"

A descriptive reason for starting this job.

" }, "commitId":{ "shape":"CommitId", - "documentation":"

Commit Id from 3rd party repository provider for the Job.

" + "documentation":"

The commit ID from a third-party repository provider for the job.

" }, "commitMessage":{ "shape":"CommitMessage", - "documentation":"

Commit message from 3rd party repository provider for the Job.

" + "documentation":"

The commit message from a third-party repository provider for the job.

" }, "commitTime":{ "shape":"CommitTime", - "documentation":"

Commit date / time for the Job.

" + "documentation":"

The commit date and time for the job.

" } }, - "documentation":"

Request structure for Start job request.

" + "documentation":"

The request structure for the start job request.

" }, "StartJobResult":{ "type":"structure", @@ -2689,10 +2733,10 @@ "members":{ "jobSummary":{ "shape":"JobSummary", - "documentation":"

Summary for the Job.

" + "documentation":"

The summary for the job.

" } }, - "documentation":"

Result structure for run job request.

" + "documentation":"

The result structure for the run job request.

" }, "StartTime":{"type":"timestamp"}, "Status":{ @@ -2715,50 +2759,50 @@ "members":{ "stepName":{ "shape":"StepName", - "documentation":"

Name of the execution step.

" + "documentation":"

The name of the execution step.

" }, "startTime":{ "shape":"StartTime", - "documentation":"

Start date/ time of the execution step.

" + "documentation":"

The start date and time of the execution step.

" }, "status":{ "shape":"JobStatus", - "documentation":"

Status of the execution step.

" + "documentation":"

The status of the execution step.

" }, "endTime":{ "shape":"EndTime", - "documentation":"

End date/ time of the execution step.

" + "documentation":"

The end date and time of the execution step.

" }, "logUrl":{ "shape":"LogUrl", - "documentation":"

URL to the logs for the execution step.

" + "documentation":"

The URL to the logs for the execution step.

" }, "artifactsUrl":{ "shape":"ArtifactsUrl", - "documentation":"

URL to the artifact for the execution step.

" + "documentation":"

The URL to the artifact for the execution step.

" }, "testArtifactsUrl":{ "shape":"TestArtifactsUrl", - "documentation":"

URL to the test artifact for the execution step.

" + "documentation":"

The URL to the test artifact for the execution step.

" }, "testConfigUrl":{ "shape":"TestConfigUrl", - "documentation":"

URL to the test config for the execution step.

" + "documentation":"

The URL to the test configuration for the execution step.

" }, "screenshots":{ "shape":"Screenshots", - "documentation":"

List of screenshot URLs for the execution step, if relevant.

" + "documentation":"

The list of screenshot URLs for the execution step, if relevant.

" }, "statusReason":{ "shape":"StatusReason", - "documentation":"

The reason for current step status.

" + "documentation":"

The reason for the current step status.

" }, "context":{ "shape":"Context", - "documentation":"

The context for current step, will include build image if step is build.

" + "documentation":"

The context for the current step. Includes a build image if the step is build.

" } }, - "documentation":"

Structure for an execution step for an execution job, for an Amplify App.

" + "documentation":"

Describes an execution step, for an execution job, for an Amplify app.

" }, "StepName":{ "type":"string", @@ -2778,24 +2822,24 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for the branch, for the Job.

", + "documentation":"

The name for the branch, for the job.

", "location":"uri", "locationName":"branchName" }, "jobId":{ "shape":"JobId", - "documentation":"

Unique Id for the Job.

", + "documentation":"

The unique id for the job.

", "location":"uri", "locationName":"jobId" } }, - "documentation":"

Request structure for stop job request.

" + "documentation":"

The request structure for the stop job request.

" }, "StopJobResult":{ "type":"structure", @@ -2803,10 +2847,10 @@ "members":{ "jobSummary":{ "shape":"JobSummary", - "documentation":"

Summary for the Job.

" + "documentation":"

The summary for the job.

" } }, - "documentation":"

Result structure for the stop job request.

" + "documentation":"

The result structure for the stop job request.

" }, "SubDomain":{ "type":"structure", @@ -2818,18 +2862,18 @@ "members":{ "subDomainSetting":{ "shape":"SubDomainSetting", - "documentation":"

Setting structure for the Subdomain.

" + "documentation":"

Describes the settings for the subdomain.

" }, "verified":{ "shape":"Verified", - "documentation":"

Verified status of the Subdomain

" + "documentation":"

The verified status of the subdomain

" }, "dnsRecord":{ "shape":"DNSRecord", - "documentation":"

DNS record for the Subdomain.

" + "documentation":"

The DNS record for the subdomain.

" } }, - "documentation":"

Subdomain for the Domain Association.

" + "documentation":"

The subdomain for the domain association.

" }, "SubDomainSetting":{ "type":"structure", @@ -2840,14 +2884,14 @@ "members":{ "prefix":{ "shape":"DomainPrefix", - "documentation":"

Prefix setting for the Subdomain.

" + "documentation":"

The prefix setting for the subdomain.

" }, "branchName":{ "shape":"BranchName", - "documentation":"

Branch name setting for the Subdomain.

" + "documentation":"

The branch name setting for the subdomain.

" } }, - "documentation":"

Setting for the Subdomain.

" + "documentation":"

Describes the settings for the subdomain.

" }, "SubDomainSettings":{ "type":"list", @@ -2861,7 +2905,7 @@ }, "TTL":{ "type":"string", - "documentation":"

The content TTL for the website in seconds.

" + "documentation":"

The content Time to Live (TTL) for the website in seconds.

" }, "TagKey":{ "type":"string", @@ -2891,22 +2935,22 @@ "members":{ "resourceArn":{ "shape":"ResourceArn", - "documentation":"

Resource arn used to tag resource.

", + "documentation":"

The Amazon Resource Name (ARN) to use to tag a resource.

", "location":"uri", "locationName":"resourceArn" }, "tags":{ "shape":"TagMap", - "documentation":"

Tags used to tag resource.

" + "documentation":"

The tags used to tag the resource.

" } }, - "documentation":"

Request structure used to tag resource.

" + "documentation":"

The request structure to tag a resource with a tag key and value.

" }, "TagResourceResponse":{ "type":"structure", "members":{ }, - "documentation":"

Response for tag resource.

" + "documentation":"

The response for the tag resource request.

" }, "TagValue":{ "type":"string", @@ -2943,7 +2987,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

Exception thrown when an operation fails due to a lack of access.

", + "documentation":"

An operation failed due to a lack of access.

", "error":{"httpStatusCode":401}, "exception":true }, @@ -2956,24 +3000,24 @@ "members":{ "resourceArn":{ "shape":"ResourceArn", - "documentation":"

Resource arn used to untag resource.

", + "documentation":"

The Amazon Resource Name (ARN) to use to untag a resource.

", "location":"uri", "locationName":"resourceArn" }, "tagKeys":{ "shape":"TagKeyList", - "documentation":"

Tag keys used to untag resource.

", + "documentation":"

The tag keys to use to untag a resource.

", "location":"querystring", "locationName":"tagKeys" } }, - "documentation":"

Request structure used to untag resource.

" + "documentation":"

The request structure for the untag resource request.

" }, "UntagResourceResponse":{ "type":"structure", "members":{ }, - "documentation":"

Response for untag resource.

" + "documentation":"

The response for the untag resource request.

" }, "UpdateAppRequest":{ "type":"structure", @@ -2981,76 +3025,80 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "name":{ "shape":"Name", - "documentation":"

Name for an Amplify App.

" + "documentation":"

The name for an Amplify app.

" }, "description":{ "shape":"Description", - "documentation":"

Description for an Amplify App.

" + "documentation":"

The description for an Amplify app.

" }, "platform":{ "shape":"Platform", - "documentation":"

Platform for an Amplify App.

" + "documentation":"

The platform for an Amplify app.

" }, "iamServiceRoleArn":{ "shape":"ServiceRoleArn", - "documentation":"

IAM service role for an Amplify App.

" + "documentation":"

The AWS Identity and Access Management (IAM) service role for an Amplify app.

" }, "environmentVariables":{ "shape":"EnvironmentVariables", - "documentation":"

Environment Variables for an Amplify App.

" + "documentation":"

The environment variables for an Amplify app.

" }, "enableBranchAutoBuild":{ "shape":"EnableAutoBuild", - "documentation":"

Enables branch auto-building for an Amplify App.

" + "documentation":"

Enables branch auto-building for an Amplify app.

" + }, + "enableBranchAutoDeletion":{ + "shape":"EnableBranchAutoDeletion", + "documentation":"

Automatically disconnects a branch in the Amplify Console when you delete a branch from your Git repository.

" }, "enableBasicAuth":{ "shape":"EnableBasicAuth", - "documentation":"

Enables Basic Authorization for an Amplify App.

" + "documentation":"

Enables basic authorization for an Amplify app.

" }, "basicAuthCredentials":{ "shape":"BasicAuthCredentials", - "documentation":"

Basic Authorization credentials for an Amplify App.

" + "documentation":"

The basic authorization credentials for an Amplify app.

" }, "customRules":{ "shape":"CustomRules", - "documentation":"

Custom redirect / rewrite rules for an Amplify App.

" + "documentation":"

The custom redirect and rewrite rules for an Amplify app.

" }, "buildSpec":{ "shape":"BuildSpec", - "documentation":"

BuildSpec for an Amplify App.

" + "documentation":"

The build specification (build spec) for an Amplify app.

" }, "enableAutoBranchCreation":{ "shape":"EnableAutoBranchCreation", - "documentation":"

Enables automated branch creation for the Amplify App.

" + "documentation":"

Enables automated branch creation for the Amplify app.

" }, "autoBranchCreationPatterns":{ "shape":"AutoBranchCreationPatterns", - "documentation":"

Automated branch creation glob patterns for the Amplify App.

" + "documentation":"

Describes the automated branch creation glob patterns for the Amplify app.

" }, "autoBranchCreationConfig":{ "shape":"AutoBranchCreationConfig", - "documentation":"

Automated branch creation branchConfig for the Amplify App.

" + "documentation":"

The automated branch creation configuration for the Amplify app.

" }, "repository":{ "shape":"Repository", - "documentation":"

Repository for an Amplify App

" + "documentation":"

The name of the repository for an Amplify app

" }, "oauthToken":{ "shape":"OauthToken", - "documentation":"

OAuth token for 3rd party source control system for an Amplify App, used to create webhook and read-only deploy key. OAuth token is not stored.

" + "documentation":"

The OAuth token for a third-party source control system for an Amplify app. The token is used to create a webhook and a read-only deploy key. The OAuth token is not stored.

" }, "accessToken":{ "shape":"AccessToken", - "documentation":"

Personal Access token for 3rd party source control system for an Amplify App, used to create webhook and read-only deploy key. Token is not stored.

" + "documentation":"

The personal access token for a third-party source control system for an Amplify app. The token is used to create webhook and a read-only deploy key. The token is not stored.

" } }, - "documentation":"

Request structure for update App request.

" + "documentation":"

The request structure for the update app request.

" }, "UpdateAppResult":{ "type":"structure", @@ -3058,10 +3106,10 @@ "members":{ "app":{ "shape":"App", - "documentation":"

App structure for the updated App.

" + "documentation":"

Represents the updated Amplify app.

" } }, - "documentation":"

Result structure for an Amplify App update request.

" + "documentation":"

The result structure for an Amplify app update request.

" }, "UpdateBranchRequest":{ "type":"structure", @@ -3072,27 +3120,27 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for the branch.

", + "documentation":"

The name for the branch.

", "location":"uri", "locationName":"branchName" }, "description":{ "shape":"Description", - "documentation":"

Description for the branch.

" + "documentation":"

The description for the branch.

" }, "framework":{ "shape":"Framework", - "documentation":"

Framework for the branch.

" + "documentation":"

The framework for the branch.

" }, "stage":{ "shape":"Stage", - "documentation":"

Stage for the branch.

" + "documentation":"

Describes the current stage for the branch.

" }, "enableNotification":{ "shape":"EnableNotification", @@ -3104,42 +3152,42 @@ }, "environmentVariables":{ "shape":"EnvironmentVariables", - "documentation":"

Environment Variables for the branch.

" + "documentation":"

The environment variables for the branch.

" }, "basicAuthCredentials":{ "shape":"BasicAuthCredentials", - "documentation":"

Basic Authorization credentials for the branch.

" + "documentation":"

The basic authorization credentials for the branch.

" }, "enableBasicAuth":{ "shape":"EnableBasicAuth", - "documentation":"

Enables Basic Auth for the branch.

" + "documentation":"

Enables basic authorization for the branch.

" }, "buildSpec":{ "shape":"BuildSpec", - "documentation":"

BuildSpec for the branch.

" + "documentation":"

The build specification (build spec) for the branch.

" }, "ttl":{ "shape":"TTL", - "documentation":"

The content TTL for the website in seconds.

" + "documentation":"

The content Time to Live (TTL) for the website in seconds.

" }, "displayName":{ "shape":"DisplayName", - "documentation":"

Display name for a branch, will use as the default domain prefix.

" + "documentation":"

The display name for a branch. This is used as the default domain prefix.

" }, "enablePullRequestPreview":{ "shape":"EnablePullRequestPreview", - "documentation":"

Enables Pull Request Preview for this branch.

" + "documentation":"

Enables pull request preview for this branch.

" }, "pullRequestEnvironmentName":{ "shape":"PullRequestEnvironmentName", - "documentation":"

The Amplify Environment name for the pull request.

" + "documentation":"

The Amplify environment name for the pull request.

" }, "backendEnvironmentArn":{ "shape":"BackendEnvironmentArn", - "documentation":"

ARN for a Backend Environment, part of an Amplify App.

" + "documentation":"

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.

" } }, - "documentation":"

Request structure for update branch request.

" + "documentation":"

The request structure for the update branch request.

" }, "UpdateBranchResult":{ "type":"structure", @@ -3147,10 +3195,10 @@ "members":{ "branch":{ "shape":"Branch", - "documentation":"

Branch structure for an Amplify App.

" + "documentation":"

The branch for an Amplify app, which maps to a third-party repository branch.

" } }, - "documentation":"

Result structure for update branch request.

" + "documentation":"

The result structure for the update branch request.

" }, "UpdateDomainAssociationRequest":{ "type":"structure", @@ -3162,26 +3210,34 @@ "members":{ "appId":{ "shape":"AppId", - "documentation":"

Unique Id for an Amplify App.

", + "documentation":"

The unique ID for an Amplify app.

", "location":"uri", "locationName":"appId" }, "domainName":{ "shape":"DomainName", - "documentation":"

Name of the domain.

", + "documentation":"

The name of the domain.

", "location":"uri", "locationName":"domainName" }, "enableAutoSubDomain":{ "shape":"EnableAutoSubDomain", - "documentation":"

Enables automated creation of Subdomains for branches. (Currently not supported)

" + "documentation":"

Enables the automated creation of subdomains for branches.

" }, "subDomainSettings":{ "shape":"SubDomainSettings", - "documentation":"

Setting structure for the Subdomain.

" + "documentation":"

Describes the settings for the subdomain.

" + }, + "autoSubDomainCreationPatterns":{ + "shape":"AutoSubDomainCreationPatterns", + "documentation":"

Sets the branch patterns for automatic subdomain creation.

" + }, + "autoSubDomainIAMRole":{ + "shape":"AutoSubDomainIAMRole", + "documentation":"

The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.

" } }, - "documentation":"

Request structure for update Domain Association request.

" + "documentation":"

The request structure for the update domain association request.

" }, "UpdateDomainAssociationResult":{ "type":"structure", @@ -3189,10 +3245,10 @@ "members":{ "domainAssociation":{ "shape":"DomainAssociation", - "documentation":"

Domain Association structure.

" + "documentation":"

Describes a domain association, which associates a custom domain with an Amplify app.

" } }, - "documentation":"

Result structure for the update Domain Association request.

" + "documentation":"

The result structure for the update domain association request.

" }, "UpdateTime":{"type":"timestamp"}, "UpdateWebhookRequest":{ @@ -3201,20 +3257,20 @@ "members":{ "webhookId":{ "shape":"WebhookId", - "documentation":"

Unique Id for a webhook.

", + "documentation":"

The unique ID for a webhook.

", "location":"uri", "locationName":"webhookId" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for a branch, part of an Amplify App.

" + "documentation":"

The name for a branch that is part of an Amplify app.

" }, "description":{ "shape":"Description", - "documentation":"

Description for a webhook.

" + "documentation":"

The description for a webhook.

" } }, - "documentation":"

Request structure for update webhook request.

" + "documentation":"

The request structure for the update webhook request.

" }, "UpdateWebhookResult":{ "type":"structure", @@ -3222,10 +3278,10 @@ "members":{ "webhook":{ "shape":"Webhook", - "documentation":"

Webhook structure.

" + "documentation":"

Describes a webhook that connects repository events to an Amplify app.

" } }, - "documentation":"

Result structure for the update webhook request.

" + "documentation":"

The result structure for the update webhook request.

" }, "UploadUrl":{ "type":"string", @@ -3246,34 +3302,34 @@ "members":{ "webhookArn":{ "shape":"WebhookArn", - "documentation":"

ARN for the webhook.

" + "documentation":"

The Amazon Resource Name (ARN) for the webhook.

" }, "webhookId":{ "shape":"WebhookId", - "documentation":"

Id of the webhook.

" + "documentation":"

The ID of the webhook.

" }, "webhookUrl":{ "shape":"WebhookUrl", - "documentation":"

Url of the webhook.

" + "documentation":"

The URL of the webhook.

" }, "branchName":{ "shape":"BranchName", - "documentation":"

Name for a branch, part of an Amplify App.

" + "documentation":"

The name for a branch that is part of an Amplify app.

" }, "description":{ "shape":"Description", - "documentation":"

Description for a webhook.

" + "documentation":"

The description for a webhook.

" }, "createTime":{ "shape":"CreateTime", - "documentation":"

Create date / time for a webhook.

" + "documentation":"

The create date and time for a webhook.

" }, "updateTime":{ "shape":"UpdateTime", - "documentation":"

Update date / time for a webhook.

" + "documentation":"

Updates the date and time for a webhook.

" } }, - "documentation":"

Structure for webhook, which associates a webhook with an Amplify App.

" + "documentation":"

Describes a webhook that connects repository events to an Amplify app.

" }, "WebhookArn":{ "type":"string", @@ -3292,5 +3348,5 @@ "member":{"shape":"Webhook"} } }, - "documentation":"

Amplify is a fully managed continuous deployment and hosting service for modern web apps.

" + "documentation":"

Amplify enables developers to develop and deploy cloud-powered mobile and web apps. The Amplify Console provides a continuous delivery and hosting service for web applications. For more information, see the Amplify Console User Guide. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation for client app development. For more information, see the Amplify Framework.

" } diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index c54482615b21..c2db488dd576 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigateway/src/main/resources/codegen-resources/service-2.json b/services/apigateway/src/main/resources/codegen-resources/service-2.json index 0e92e9026367..f5724ee85726 100755 --- a/services/apigateway/src/main/resources/codegen-resources/service-2.json +++ b/services/apigateway/src/main/resources/codegen-resources/service-2.json @@ -1995,7 +1995,7 @@ }, "destinationArn":{ "shape":"String", - "documentation":"

The ARN of the CloudWatch Logs log group to receive access logs.

" + "documentation":"

The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis Data Firehose delivery stream to receive access logs. If you specify a Kinesis Data Firehose delivery stream, the stream name must begin with amazon-apigateway-.

" } }, "documentation":"

Access log settings, including the access log format and access log destination ARN.

" @@ -2364,7 +2364,7 @@ }, "generateDistinctId":{ "shape":"Boolean", - "documentation":"

Specifies whether (true) or not (false) the key identifier is distinct from the created API key value.

" + "documentation":"

Specifies whether (true) or not (false) the key identifier is distinct from the created API key value. This parameter is deprecated and should not be used.

" }, "value":{ "shape":"String", @@ -2461,7 +2461,7 @@ }, "stage":{ "shape":"String", - "documentation":"

The name of the API's stage that you want to use for this mapping. Specify '(none)' if you do not want callers to explicitly specify the stage name after any base path name.

" + "documentation":"

The name of the API's stage that you want to use for this mapping. Specify '(none)' if you want callers to explicitly specify the stage name after any base path name.

" } }, "documentation":"

Requests API Gateway to create a new BasePathMapping resource.

" @@ -2877,7 +2877,7 @@ }, "targetArns":{ "shape":"ListOfString", - "documentation":"

[Required] The ARNs of network load balancers of the VPC targeted by the VPC link. The network load balancers must be owned by the same AWS account of the API owner.

" + "documentation":"

[Required] The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by the same AWS account of the API owner.

" }, "tags":{ "shape":"MapOfStringToString", @@ -4693,7 +4693,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

[Required] The ARN of a resource that can be tagged. The resource ARN must be URL-encoded.

", + "documentation":"

[Required] The ARN of a resource that can be tagged.

", "location":"uri", "locationName":"resource_arn" }, @@ -4957,7 +4957,7 @@ }, "body":{ "shape":"Blob", - "documentation":"

[Required] The POST request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 2MB.

" + "documentation":"

[Required] The POST request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 6MB.

" } }, "documentation":"

A POST request to import an API to API Gateway using an input of an API definition file.

", @@ -5013,7 +5013,7 @@ }, "cacheNamespace":{ "shape":"String", - "documentation":"

An API-specific tag group of related cached parameters. To be valid values for cacheKeyParameters, these parameters must also be specified for Method requestParameters.

" + "documentation":"

Specifies a group of related cached parameters. By default, API Gateway uses the resource ID as the cacheNamespace. You can specify the same cacheNamespace across resources to return the same cached data for requests to different resources.

" }, "cacheKeyParameters":{ "shape":"ListOfString", @@ -5022,6 +5022,10 @@ "integrationResponses":{ "shape":"MapOfIntegrationResponse", "documentation":"

Specifies the integration's responses.

Example: Get integration responses of a method

Request

GET /restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200 HTTP/1.1 Content-Type: application/json Host: apigateway.us-east-1.amazonaws.com X-Amz-Date: 20160607T191449Z Authorization: AWS4-HMAC-SHA256 Credential={access_key_ID}/20160607/us-east-1/apigateway/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature={sig4_hash} 
Response

The successful response returns 200 OK status and a payload as follows:

{ \"_links\": { \"curies\": { \"href\": \"https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-integration-response-{rel}.html\", \"name\": \"integrationresponse\", \"templated\": true }, \"self\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\", \"title\": \"200\" }, \"integrationresponse:delete\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" }, \"integrationresponse:update\": { \"href\": \"/restapis/fugvjdxtri/resources/3kzxbg5sa2/methods/GET/integration/responses/200\" } }, \"responseParameters\": { \"method.response.header.Content-Type\": \"'application/xml'\" }, \"responseTemplates\": { \"application/json\": \"$util.urlDecode(\\\"%3CkinesisStreams%3E#foreach($stream in $input.path('$.StreamNames'))%3Cstream%3E%3Cname%3E$stream%3C/name%3E%3C/stream%3E#end%3C/kinesisStreams%3E\\\")\\n\" }, \"statusCode\": \"200\" }

" + }, + "tlsConfig":{ + "shape":"TlsConfig", + "documentation":"

Specifies the TLS configuration for an integration.

" } }, "documentation":"

Represents an HTTP, HTTP_PROXY, AWS, AWS_PROXY, or Mock integration.

In the API Gateway console, the built-in Lambda integration is an AWS integration.
" @@ -5321,7 +5325,7 @@ }, "loggingLevel":{ "shape":"String", - "documentation":"

Specifies the logging level for this method, which affects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, and the available levels are OFF, ERROR, and INFO.

" + "documentation":"

Specifies the logging level for this method, which affects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, and the available levels are OFF, ERROR, and INFO. Choose ERROR to write only error-level entries to CloudWatch Logs, or choose INFO to include all ERROR events as well as extra informational events.

" }, "dataTraceEnabled":{ "shape":"Boolean", @@ -5560,11 +5564,11 @@ }, "cacheNamespace":{ "shape":"String", - "documentation":"

A list of request parameters whose values are to be cached.

" + "documentation":"

Specifies a group of related cached parameters. By default, API Gateway uses the resource ID as the cacheNamespace. You can specify the same cacheNamespace across resources to return the same cached data for requests to different resources.

" }, "cacheKeyParameters":{ "shape":"ListOfString", - "documentation":"

An API-specific tag group of related cached parameters.

" + "documentation":"

A list of request parameters whose values API Gateway caches. To be valid values for cacheKeyParameters, these parameters must also be specified for Method requestParameters.

" }, "contentHandling":{ "shape":"ContentHandlingStrategy", @@ -5573,7 +5577,8 @@ "timeoutInMillis":{ "shape":"NullableInteger", "documentation":"

Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds.

" - } + }, + "tlsConfig":{"shape":"TlsConfig"} }, "documentation":"

Sets up a method's integration.

" }, @@ -5774,7 +5779,7 @@ }, "body":{ "shape":"Blob", - "documentation":"

[Required] The PUT request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 2MB.

" + "documentation":"

[Required] The PUT request body containing external API definitions. Currently, only OpenAPI definition JSON/YAML files are supported. The maximum size of the API definition file is 6MB.

" } }, "documentation":"

A PUT request to update an existing API, with external API definitions specified as the request body.

", @@ -6162,7 +6167,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

[Required] The ARN of a resource that can be tagged. The resource ARN must be URL-encoded.

", + "documentation":"

[Required] The ARN of a resource that can be tagged.

", "location":"uri", "locationName":"resource_arn" }, @@ -6188,7 +6193,7 @@ "members":{ "value":{ "shape":"String", - "documentation":"

The Apache Velocity Template Language (VTL) template content used for the template resource.

" + "documentation":"

The Apache Velocity Template Language (VTL) template content used for the template resource.

" } }, "documentation":"

Represents a mapping template used to transform a payload.

" @@ -6368,6 +6373,15 @@ "documentation":"

The API request rate limits.

" }, "Timestamp":{"type":"timestamp"}, + "TlsConfig":{ + "type":"structure", + "members":{ + "insecureSkipVerification":{ + "shape":"Boolean", + "documentation":"

Specifies whether or not API Gateway skips verification that the certificate for an integration endpoint is issued by a supported certificate authority. This isn’t recommended, but it enables you to use certificates that are signed by private certificate authorities, or certificates that are self-signed. If enabled, API Gateway still performs basic certificate validation, which includes checking the certificate's expiration date, hostname, and presence of a root certificate authority. Supported only for HTTP and HTTP_PROXY integrations.

" + } + } + }, "TooManyRequestsException":{ "type":"structure", "members":{ @@ -6408,7 +6422,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

[Required] The ARN of a resource that can be tagged. The resource ARN must be URL-encoded.

", + "documentation":"

[Required] The ARN of a resource that can be tagged.

", "location":"uri", "locationName":"resource_arn" }, @@ -7090,7 +7104,7 @@ }, "targetArns":{ "shape":"ListOfString", - "documentation":"

The ARNs of network load balancers of the VPC targeted by the VPC link. The network load balancers must be owned by the same AWS account of the API owner.

" + "documentation":"

The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by the same AWS account of the API owner.

" }, "status":{ "shape":"VpcLinkStatus", @@ -7105,7 +7119,7 @@ "documentation":"

The collection of tags. Each tag element is associated with a given resource.

" } }, - "documentation":"

A API Gateway VPC link for a RestApi to access resources in an Amazon Virtual Private Cloud (VPC).

To enable access to a resource in an Amazon Virtual Private Cloud through Amazon API Gateway, you, as an API developer, create a VpcLink resource targeted for one or more network load balancers of the VPC and then integrate an API method with a private integration that uses the VpcLink. The private integration has an integration type of HTTP or HTTP_PROXY and has a connection type of VPC_LINK. The integration uses the connectionId property to identify the VpcLink used.

" + "documentation":"

An API Gateway VPC link for a RestApi to access resources in an Amazon Virtual Private Cloud (VPC).

To enable access to a resource in an Amazon Virtual Private Cloud through Amazon API Gateway, you, as an API developer, create a VpcLink resource targeted for one or more network load balancers of the VPC and then integrate an API method with a private integration that uses the VpcLink. The private integration has an integration type of HTTP or HTTP_PROXY and has a connection type of VPC_LINK. The integration uses the connectionId property to identify the VpcLink used.

" }, "VpcLinkStatus":{ "type":"string", diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index 11018a2e731b..a4248b8ea9f9 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index 02c0a4831457..932f250c5b4c 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json b/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json index 82402aa956df..2c9c70ffb46b 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json @@ -667,6 +667,31 @@ } ], "documentation" : "

Deletes a VPC link.

" }, + "ExportApi" : { + "name" : "ExportApi", + "http" : { + "method" : "GET", + "requestUri" : "/v2/apis/{apiId}/exports/{specification}", + "responseCode" : 200 + }, + "input" : { + "shape" : "ExportApiRequest" + }, + "output" : { + "shape" : "ExportApiResponse", + "documentation" : "

Success

" + }, + "errors" : [ { + "shape" : "NotFoundException", + "documentation" : "

The resource specified in the request was not found.

" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" + }, { + "shape" : "BadRequestException", + "documentation" : "

One of the parameters in the request is invalid.

" + } ] + }, "GetApi" : { "name" : "GetApi", "http" : { @@ -2735,7 +2760,7 @@ "IntegrationUri" : { "shape" : "UriWithLengthBetween1And2048", "locationName" : "integrationUri", - "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" + "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" }, "PassthroughBehavior" : { "shape" : "PassthroughBehavior", @@ -2823,7 +2848,7 @@ "IntegrationUri" : { "shape" : "UriWithLengthBetween1And2048", "locationName" : "integrationUri", - "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" + "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" }, "PassthroughBehavior" : { "shape" : "PassthroughBehavior", @@ -2920,7 +2945,7 @@ "IntegrationUri" : { "shape" : "UriWithLengthBetween1And2048", "locationName" : "integrationUri", - "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" + "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" }, "PassthroughBehavior" : { "shape" : "PassthroughBehavior", @@ -4215,6 +4240,63 @@ "documentation" : "

Represents an endpoint type.

", "enum" : [ "REGIONAL", "EDGE" ] }, + "ExportApiRequest" : { + "type" : "structure", + "members" : { + "ApiId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "apiId", + "documentation" : "

The API identifier.

" + }, + "ExportVersion" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "exportVersion", + "documentation" : "

The version of the API Gateway export algorithm. API Gateway uses the latest version by default. Currently, the only supported version is 1.0.

" + }, + "IncludeExtensions" : { + "shape" : "__boolean", + "location" : "querystring", + "locationName" : "includeExtensions", + "documentation" : "

Specifies whether to include API Gateway extensions in the exported API definition. API Gateway extensions are included by default.

" + }, + "OutputType" : { + "shape" : "__string", + "enum" : ["YAML", "JSON"], + "location" : "querystring", + "locationName" : "outputType", + "documentation" : "

The output type of the exported definition file. Valid values are JSON and YAML.

" + }, + "Specification" : { + "shape" : "__string", + "enum" : ["OAS30"], + "location" : "uri", + "locationName" : "specification", + "documentation" : "

The version of the API specification to use. OAS30, for OpenAPI 3.0, is the only supported value.

" + }, + "StageName" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "stageName", + "documentation" : "

The name of the API stage to export. If you don't specify this property, a representation of the latest API configuration is exported.

" + } + }, + "required" : [ "Specification", "OutputType", "ApiId" ] + }, + "ExportApiResponse" : { + "type" : "structure", + "members" : { + "body":{ + "shape":"ExportedApi" + } + }, + "payload":"body" + }, + "ExportedApi":{ + "type":"blob", + "documentation" : "

Represents an exported definition of an API in a particular output format, for example, YAML. The API is serialized to the requested specification, for example, OpenAPI 3.0.

" + }, "GetApiMappingRequest" : { "type" : "structure", "members" : { @@ -4758,7 +4840,7 @@ "IntegrationUri" : { "shape" : "UriWithLengthBetween1And2048", "locationName" : "integrationUri", - "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" + "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" }, "PassthroughBehavior" : { "shape" : "PassthroughBehavior", @@ -5558,7 +5640,7 @@ "shape" : "__string", "location" : "querystring", "locationName" : "basepath", - "documentation" : "

Specifies how to interpret the base path of the API during import. Valid values are ignore, prepend, and split. The default value is ignore. To learn more, see Set the OpenAPI basePath Property. Supported only for HTTP APIs.

" + "documentation" : "

Specifies how to interpret the base path of the API during import. Valid values are ignore, prepend, and split. The default value is ignore. To learn more, see Set the OpenAPI basePath Property. Supported only for HTTP APIs.

" }, "Body" : { "shape" : "__string", @@ -5569,7 +5651,7 @@ "shape" : "__boolean", "location" : "querystring", "locationName" : "failOnWarnings", - "documentation" : "

Specifies whether to rollback the API creation (true) or not (false) when a warning is encountered. The default value is false.

" + "documentation" : "

Specifies whether to rollback the API creation when a warning is encountered. By default, API creation continues if a warning is encountered.

" } }, "documentation" : "

", @@ -5724,7 +5806,7 @@ "IntegrationUri" : { "shape" : "UriWithLengthBetween1And2048", "locationName" : "integrationUri", - "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" + "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" }, "PassthroughBehavior" : { "shape" : "PassthroughBehavior", @@ -6003,7 +6085,7 @@ "shape" : "__string", "location" : "querystring", "locationName" : "basepath", - "documentation" : "

Specifies how to interpret the base path of the API during import. Valid values are ignore, prepend, and split. The default value is ignore. To learn more, see Set the OpenAPI basePath Property. Supported only for HTTP APIs.

" + "documentation" : "

Specifies how to interpret the base path of the API during import. Valid values are ignore, prepend, and split. The default value is ignore. To learn more, see Set the OpenAPI basePath Property. Supported only for HTTP APIs.

" }, "Body" : { "shape" : "__string", @@ -6014,7 +6096,7 @@ "shape" : "__boolean", "location" : "querystring", "locationName" : "failOnWarnings", - "documentation" : "

Specifies whether to rollback the API creation (true) or not (false) when a warning is encountered. The default value is false.

" + "documentation" : "

Specifies whether to rollback the API creation when a warning is encountered. By default, API creation continues if a warning is encountered.

" } }, "documentation" : "

", @@ -7176,7 +7258,7 @@ "IntegrationUri" : { "shape" : "UriWithLengthBetween1And2048", "locationName" : "integrationUri", - "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" + "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" }, "PassthroughBehavior" : { "shape" : "PassthroughBehavior", @@ -7269,7 +7351,7 @@ "IntegrationUri" : { "shape" : "UriWithLengthBetween1And2048", "locationName" : "integrationUri", - "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" + "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" }, "PassthroughBehavior" : { "shape" : "PassthroughBehavior", @@ -7366,7 +7448,7 @@ "IntegrationUri" : { "shape" : "UriWithLengthBetween1And2048", "locationName" : "integrationUri", - "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" + "documentation" : "

For a Lambda integration, specify the URI of a Lambda function.

For an HTTP integration, specify a fully-qualified URL.

For an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances to identify resources. You can use query parameters to target specific resources. To learn more, see DiscoverInstances. For private integrations, all resources must be owned by the same AWS account.

" }, "PassthroughBehavior" : { "shape" : "PassthroughBehavior", diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index 216eef851e8c..c69767007b57 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appconfig/src/main/resources/codegen-resources/paginators-1.json b/services/appconfig/src/main/resources/codegen-resources/paginators-1.json index 6a79ddb03f74..c76cf37f2ccb 100644 --- a/services/appconfig/src/main/resources/codegen-resources/paginators-1.json +++ b/services/appconfig/src/main/resources/codegen-resources/paginators-1.json @@ -24,6 +24,11 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" + }, + "ListHostedConfigurationVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/services/appconfig/src/main/resources/codegen-resources/service-2.json b/services/appconfig/src/main/resources/codegen-resources/service-2.json index e4353582bc71..a94258d35834 100644 --- a/services/appconfig/src/main/resources/codegen-resources/service-2.json +++ b/services/appconfig/src/main/resources/codegen-resources/service-2.json @@ -75,6 +75,25 @@ ], "documentation":"

For each application, you define one or more environments. An environment is a logical deployment group of AppConfig targets, such as applications in a Beta or Production environment. You can also define environments for application subcomponents such as the Web, Mobile and Back-end components for your application. You can configure Amazon CloudWatch alarms for each environment. The system monitors alarms during a configuration deployment. If an alarm is triggered, the system rolls back the configuration.

" }, + "CreateHostedConfigurationVersion":{ + "name":"CreateHostedConfigurationVersion", + "http":{ + "method":"POST", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions", + "responseCode":201 + }, + "input":{"shape":"CreateHostedConfigurationVersionRequest"}, + "output":{"shape":"HostedConfigurationVersion"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"PayloadTooLargeException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Create a new configuration in the AppConfig configuration store.

" + }, "DeleteApplication":{ "name":"DeleteApplication", "http":{ @@ -137,6 +156,21 @@ ], "documentation":"

Delete an environment. Deleting an environment does not delete a configuration from a host.

" }, + "DeleteHostedConfigurationVersion":{ + "name":"DeleteHostedConfigurationVersion", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions/{VersionNumber}", + "responseCode":204 + }, + "input":{"shape":"DeleteHostedConfigurationVersionRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Delete a version of a configuration from the AppConfig configuration store.

" + }, "GetApplication":{ "name":"GetApplication", "http":{ @@ -165,10 +199,9 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"BadRequestException"} ], - "documentation":"

Retrieve information about a configuration.

" + "documentation":"

Receive information about a configuration.

AWS AppConfig uses the value of the ClientConfigurationVersion parameter to identify the configuration version on your clients. If you don’t send ClientConfigurationVersion with each call to GetConfiguration, your clients receive the current configuration. You are charged each time your clients receive a configuration.

To avoid excess charges, we recommend that you include the ClientConfigurationVersion value with every call to GetConfiguration. This value must be saved on your client. Subsequent calls to GetConfiguration must pass this value by using the ClientConfigurationVersion parameter.

" }, "GetConfigurationProfile":{ "name":"GetConfigurationProfile", @@ -234,6 +267,22 @@ ], "documentation":"

Retrieve information about an environment. An environment is a logical deployment group of AppConfig applications, such as applications in a Production environment or in an EU_Region environment. Each configuration deployment targets an environment. You can enable one or more Amazon CloudWatch alarms for an environment. If an alarm is triggered during a deployment, AppConfig roles back the configuration.

" }, + "GetHostedConfigurationVersion":{ + "name":"GetHostedConfigurationVersion", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions/{VersionNumber}", + "responseCode":200 + }, + "input":{"shape":"GetHostedConfigurationVersionRequest"}, + "output":{"shape":"HostedConfigurationVersion"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Get information about a specific configuration version.

" + }, "ListApplications":{ "name":"ListApplications", "http":{ @@ -312,6 +361,22 @@ ], "documentation":"

List the environments for an application.

" }, + "ListHostedConfigurationVersions":{ + "name":"ListHostedConfigurationVersions", + "http":{ + "method":"GET", + "requestUri":"/applications/{ApplicationId}/configurationprofiles/{ConfigurationProfileId}/hostedconfigurationversions", + "responseCode":200 + }, + "input":{"shape":"ListHostedConfigurationVersionsRequest"}, + "output":{"shape":"HostedConfigurationVersions"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

View a list of configurations stored in the AppConfig configuration store by version.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -521,7 +586,14 @@ "error":{"httpStatusCode":400}, "exception":true }, - "Blob":{"type":"blob"}, + "Blob":{ + "type":"blob", + "sensitive":true + }, + "BytesMeasure":{ + "type":"string", + "enum":["KILOBYTES"] + }, "Configuration":{ "type":"structure", "members":{ @@ -568,7 +640,7 @@ "documentation":"

The URI location of the configuration.

" }, "RetrievalRoleArn":{ - "shape":"Arn", + "shape":"RoleArn", "documentation":"

The ARN of an IAM role with permission to access the configuration at the specified LocationUri.

" }, "Validators":{ @@ -652,8 +724,7 @@ "required":[ "ApplicationId", "Name", - "LocationUri", - "RetrievalRoleArn" + "LocationUri" ], "members":{ "ApplicationId":{ @@ -675,7 +746,7 @@ "documentation":"

A URI to locate the configuration. You can specify a Systems Manager (SSM) document, an SSM Parameter Store parameter, or an Amazon S3 object. For an SSM document, specify either the document name in the format ssm-document://<Document_name> or the Amazon Resource Name (ARN). For a parameter, specify either the parameter name in the format ssm-parameter://<Parameter_name> or the ARN. For an Amazon S3 object, specify the URI in the following format: s3://<bucket>/<objectKey> . Here is an example: s3://my-bucket/my-app/us-east-1/my-config.json

" }, "RetrievalRoleArn":{ - "shape":"Arn", + "shape":"RoleArn", "documentation":"

The ARN of an IAM role with permission to access the configuration at the specified LocationUri.

" }, "Validators":{ @@ -764,6 +835,53 @@ } } }, + "CreateHostedConfigurationVersionRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId", + "Content", + "ContentType" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the configuration.

", + "location":"header", + "locationName":"Description" + }, + "Content":{ + "shape":"Blob", + "documentation":"

The content of the configuration or the configuration data.

" + }, + "ContentType":{ + "shape":"StringWithLengthBetween1And255", + "documentation":"

A standard MIME type describing the format of the configuration content. For more information, see Content-Type.

", + "location":"header", + "locationName":"Content-Type" + }, + "LatestVersionNumber":{ + "shape":"Integer", + "documentation":"

An optional locking token used to prevent race conditions from overwriting configuration updates when creating a new version. To ensure your data is not overwritten when creating multiple hosted configuration versions in rapid succession, specify the version of the latest hosted configuration version.

", + "box":true, + "location":"header", + "locationName":"Latest-Version-Number" + } + }, + "payload":"Content" + }, "DeleteApplicationRequest":{ "type":"structure", "required":["ApplicationId"], @@ -830,6 +948,34 @@ } } }, + "DeleteHostedConfigurationVersionRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId", + "VersionNumber" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

The versions number to delete.

", + "location":"uri", + "locationName":"VersionNumber" + } + } + }, "Deployment":{ "type":"structure", "members":{ @@ -1011,7 +1157,7 @@ }, "DeploymentStrategyId":{ "type":"string", - "pattern":"([a-z0-9]{4,7}|arn:aws.*)" + "pattern":"(^[a-z0-9]{4,7}$|^AppConfig\\.[A-Za-z0-9]{9,40}$)" }, "DeploymentStrategyList":{ "type":"list", @@ -1140,6 +1286,7 @@ } } }, + "Float":{"type":"float"}, "GetApplicationRequest":{ "type":"structure", "required":["ApplicationId"], @@ -1208,7 +1355,7 @@ }, "ClientConfigurationVersion":{ "shape":"Version", - "documentation":"

The configuration version returned in the most recent GetConfiguration response.

", + "documentation":"

The configuration version returned in the most recent GetConfiguration response.

AWS AppConfig uses the value of the ClientConfigurationVersion parameter to identify the configuration version on your clients. If you don’t send ClientConfigurationVersion with each call to GetConfiguration, your clients receive the current configuration. You are charged each time your clients receive a configuration.

To avoid excess charges, we recommend that you include the ClientConfigurationVersion value with every call to GetConfiguration. This value must be saved on your client. Subsequent calls to GetConfiguration must pass this value by using the ClientConfigurationVersion parameter.

For more information about working with configurations, see Retrieving the Configuration in the AWS AppConfig User Guide.

", "location":"querystring", "locationName":"client_configuration_version" } @@ -1276,6 +1423,34 @@ } } }, + "GetHostedConfigurationVersionRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId", + "VersionNumber" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

The version.

", + "location":"uri", + "locationName":"VersionNumber" + } + } + }, "GrowthFactor":{ "type":"float", "max":100.0, @@ -1288,6 +1463,89 @@ "EXPONENTIAL" ] }, + "HostedConfigurationVersion":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

", + "location":"header", + "locationName":"Application-Id" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

", + "location":"header", + "locationName":"Configuration-Profile-Id" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

The configuration version.

", + "location":"header", + "locationName":"Version-Number" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the configuration.

", + "location":"header", + "locationName":"Description" + }, + "Content":{ + "shape":"Blob", + "documentation":"

The content of the configuration or the configuration data.

" + }, + "ContentType":{ + "shape":"StringWithLengthBetween1And255", + "documentation":"

A standard MIME type describing the format of the configuration content. For more information, see Content-Type.

", + "location":"header", + "locationName":"Content-Type" + } + }, + "payload":"Content" + }, + "HostedConfigurationVersionSummary":{ + "type":"structure", + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

" + }, + "VersionNumber":{ + "shape":"Integer", + "documentation":"

The configuration version.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the configuration.

" + }, + "ContentType":{ + "shape":"StringWithLengthBetween1And255", + "documentation":"

A standard MIME type describing the format of the configuration content. For more information, see Content-Type.

" + } + }, + "documentation":"

Information about the configuration.

" + }, + "HostedConfigurationVersionSummaryList":{ + "type":"list", + "member":{"shape":"HostedConfigurationVersionSummary"} + }, + "HostedConfigurationVersions":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"HostedConfigurationVersionSummaryList", + "documentation":"

The elements from this collection.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of items to return. Use this token to get the next set of results.

" + } + } + }, "Id":{ "type":"string", "pattern":"[a-z0-9]{4,7}" @@ -1427,6 +1685,40 @@ } } }, + "ListHostedConfigurationVersionsRequest":{ + "type":"structure", + "required":[ + "ApplicationId", + "ConfigurationProfileId" + ], + "members":{ + "ApplicationId":{ + "shape":"Id", + "documentation":"

The application ID.

", + "location":"uri", + "locationName":"ApplicationId" + }, + "ConfigurationProfileId":{ + "shape":"Id", + "documentation":"

The configuration profile ID.

", + "location":"uri", + "locationName":"ConfigurationProfileId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

", + "box":true, + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to start the list. Use this token to get the next set of results.

", + "location":"querystring", + "locationName":"next_token" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceArn"], @@ -1457,7 +1749,7 @@ "documentation":"

ARN of the Amazon CloudWatch alarm.

" }, "AlarmRoleArn":{ - "shape":"Arn", + "shape":"RoleArn", "documentation":"

ARN of an IAM role for AppConfig to monitor AlarmArn.

" } }, @@ -1479,6 +1771,18 @@ "max":2048, "min":1 }, + "PayloadTooLargeException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "Measure":{"shape":"BytesMeasure"}, + "Limit":{"shape":"Float"}, + "Size":{"shape":"Float"} + }, + "documentation":"

The configuration size is too large.

", + "error":{"httpStatusCode":413}, + "exception":true + }, "Percentage":{ "type":"float", "max":100.0, @@ -1510,6 +1814,21 @@ } } }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^((arn):(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):(iam)::\\d{12}:role[/].*)$" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The number of hosted configuration versions exceeds the limit for the AppConfig configuration store. Delete one or more versions and try again.

", + "error":{"httpStatusCode":402}, + "exception":true + }, "StartDeploymentRequest":{ "type":"structure", "required":[ @@ -1587,7 +1906,13 @@ "StringWithLengthBetween0And32768":{ "type":"string", "max":32768, - "min":0 + "min":0, + "sensitive":true + }, + "StringWithLengthBetween1And255":{ + "type":"string", + "max":255, + "min":1 }, "StringWithLengthBetween1And64":{ "type":"string", @@ -1713,7 +2038,7 @@ "documentation":"

A description of the configuration profile.

" }, "RetrievalRoleArn":{ - "shape":"Arn", + "shape":"RoleArn", "documentation":"

The ARN of an IAM role with permission to access the configuration at the specified LocationUri.

" }, "Validators":{ @@ -1862,9 +2187,9 @@ }, "Version":{ "type":"string", - "max":128, + "max":1024, "min":1 } }, - "documentation":"AWS AppConfig

Use AWS AppConfig, a capability of AWS Systems Manager, to create, manage, and quickly deploy application configurations. AppConfig supports controlled deployments to applications of any size and includes built-in validation checks and monitoring. You can use AppConfig with applications hosted on Amazon EC2 instances, AWS Lambda, containers, mobile applications, or IoT devices.

To prevent errors when deploying application configurations, especially for production systems where a simple typo could cause an unexpected outage, AppConfig includes validators. A validator provides a syntactic or semantic check to ensure that the configuration you want to deploy works as intended. To validate your application configuration data, you provide a schema or a Lambda function that runs against the configuration. The configuration deployment or update can only proceed when the configuration data is valid.

During a configuration deployment, AppConfig monitors the application to ensure that the deployment is successful. If the system encounters an error, AppConfig rolls back the change to minimize impact for your application users. You can configure a deployment strategy for each application or environment that includes deployment criteria, including velocity, bake time, and alarms to monitor. Similar to error monitoring, if a deployment triggers an alarm, AppConfig automatically rolls back to the previous version.

AppConfig supports multiple use cases. Here are some examples.

  • Application tuning: Use AppConfig to carefully introduce changes to your application that can only be tested with production traffic.

  • Feature toggle: Use AppConfig to turn on new features that require a timely deployment, such as a product launch or announcement.

  • User membership: Use AppConfig to allow premium subscribers to access paid content.

  • Operational issues: Use AppConfig to reduce stress on your application when a dependency or other external factor impacts the system.

This reference is intended to be used with the AWS AppConfig User Guide.

" + "documentation":"AWS AppConfig

Use AWS AppConfig, a capability of AWS Systems Manager, to create, manage, and quickly deploy application configurations. AppConfig supports controlled deployments to applications of any size and includes built-in validation checks and monitoring. You can use AppConfig with applications hosted on Amazon EC2 instances, AWS Lambda, containers, mobile applications, or IoT devices.

To prevent errors when deploying application configurations, especially for production systems where a simple typo could cause an unexpected outage, AppConfig includes validators. A validator provides a syntactic or semantic check to ensure that the configuration you want to deploy works as intended. To validate your application configuration data, you provide a schema or a Lambda function that runs against the configuration. The configuration deployment or update can only proceed when the configuration data is valid.

During a configuration deployment, AppConfig monitors the application to ensure that the deployment is successful. If the system encounters an error, AppConfig rolls back the change to minimize impact for your application users. You can configure a deployment strategy for each application or environment that includes deployment criteria, including velocity, bake time, and alarms to monitor. Similar to error monitoring, if a deployment triggers an alarm, AppConfig automatically rolls back to the previous version.

AppConfig supports multiple use cases. Here are some examples.

  • Application tuning: Use AppConfig to carefully introduce changes to your application that can only be tested with production traffic.

  • Feature toggle: Use AppConfig to turn on new features that require a timely deployment, such as a product launch or announcement.

  • Allow list: Use AppConfig to allow premium subscribers to access paid content.

  • Operational issues: Use AppConfig to reduce stress on your application when a dependency or other external factor impacts the system.

This reference is intended to be used with the AWS AppConfig User Guide.

" } diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 518c480ad9eb..3497a0ecb2b9 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json b/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json index 21a638e7fc8f..fa279ec978c4 100644 --- a/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deletes the specified scaling policy for an Application Auto Scaling scalable target.

Deleting a step scaling policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

For more information, see Delete a Step Scaling Policy and Delete a Target Tracking Scaling Policy in the Application Auto Scaling User Guide.

To create a scaling policy or update an existing one, see PutScalingPolicy.

" + "documentation":"

Deletes the specified scaling policy for an Application Auto Scaling scalable target.

Deleting a step scaling policy deletes the underlying alarm action, but does not delete the CloudWatch alarm associated with the scaling policy, even if it no longer has an associated action.

For more information, see Delete a Step Scaling Policy and Delete a Target Tracking Scaling Policy in the Application Auto Scaling User Guide.

" }, "DeleteScheduledAction":{ "name":"DeleteScheduledAction", @@ -59,7 +59,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deregisters an Application Auto Scaling scalable target.

Deregistering a scalable target deletes the scaling policies that are associated with it.

To create a scalable target or update an existing one, see RegisterScalableTarget.

" + "documentation":"

Deregisters an Application Auto Scaling scalable target when you have finished using it. To see which resources have been registered, use DescribeScalableTargets.

Deregistering a scalable target deletes the scaling policies and the scheduled actions that are associated with it.

" }, "DescribeScalableTargets":{ "name":"DescribeScalableTargets", @@ -75,7 +75,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Gets information about the scalable targets in the specified namespace.

You can filter the results using ResourceIds and ScalableDimension.

To create a scalable target or update an existing one, see RegisterScalableTarget. If you are no longer using a scalable target, you can deregister it using DeregisterScalableTarget.

" + "documentation":"

Gets information about the scalable targets in the specified namespace.

You can filter the results using ResourceIds and ScalableDimension.

" }, "DescribeScalingActivities":{ "name":"DescribeScalingActivities", @@ -91,7 +91,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks.

You can filter the results using ResourceId and ScalableDimension.

Scaling activities are triggered by CloudWatch alarms that are associated with scaling policies. To view the scaling policies for a service namespace, see DescribeScalingPolicies. To create a scaling policy or update an existing one, see PutScalingPolicy.

" + "documentation":"

Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks.

You can filter the results using ResourceId and ScalableDimension.

" }, "DescribeScalingPolicies":{ "name":"DescribeScalingPolicies", @@ -108,7 +108,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Describes the Application Auto Scaling scaling policies for the specified service namespace.

You can filter the results using ResourceId, ScalableDimension, and PolicyNames.

To create a scaling policy or update an existing one, see PutScalingPolicy. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

" + "documentation":"

Describes the Application Auto Scaling scaling policies for the specified service namespace.

You can filter the results using ResourceId, ScalableDimension, and PolicyNames.

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

" }, "DescribeScheduledActions":{ "name":"DescribeScheduledActions", @@ -124,7 +124,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Describes the Application Auto Scaling scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

To create a scheduled action or update an existing one, see PutScheduledAction. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

" + "documentation":"

Describes the Application Auto Scaling scheduled actions for the specified service namespace.

You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames parameters.

For more information, see Scheduled Scaling in the Application Auto Scaling User Guide.

" }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -142,7 +142,7 @@ {"shape":"FailedResourceAccessException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates or updates a policy for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you have registered the resource as a scalable target using RegisterScalableTarget.

To update a policy, specify its policy name and the parameters that you want to change. Any parameters that you don't specify are not changed by this update request.

You can view the scaling policies for a service namespace using DescribeScalingPolicies. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

Multiple scaling policies can be in force at the same time for the same scalable target. You can have one or more target tracking scaling policies, one or more step scaling policies, or both. However, there is a chance that multiple policies could conflict, instructing the scalable target to scale out or in at the same time. Application Auto Scaling gives precedence to the policy that provides the largest capacity for both scale out and scale in. For example, if one policy increases capacity by 3, another policy increases capacity by 200 percent, and the current capacity is 10, Application Auto Scaling uses the policy with the highest calculated capacity (200% of 10 = 20) and scales out to 30.

Learn more about how to work with scaling policies in the Application Auto Scaling User Guide.

" + "documentation":"

Creates or updates a scaling policy for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you have registered the resource as a scalable target.

Multiple scaling policies can be in force at the same time for the same scalable target. You can have one or more target tracking scaling policies, one or more step scaling policies, or both. However, there is a chance that multiple policies could conflict, instructing the scalable target to scale out or in at the same time. Application Auto Scaling gives precedence to the policy that provides the largest capacity for both scale out and scale in. For example, if one policy increases capacity by 3, another policy increases capacity by 200 percent, and the current capacity is 10, Application Auto Scaling uses the policy with the highest calculated capacity (200% of 10 = 20) and scales out to 30.

We recommend caution, however, when using target tracking scaling policies with step scaling policies because conflicts between these policies can cause undesirable behavior. For example, if the step scaling policy initiates a scale-in activity before the target tracking policy is ready to scale in, the scale-in activity will not be blocked. After the scale-in activity completes, the target tracking policy could instruct the scalable target to scale out again.

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

If a scalable target is deregistered, the scalable target is no longer available to execute scaling policies. Any scaling policies that were specified for the scalable target are deleted.

" }, "PutScheduledAction":{ "name":"PutScheduledAction", @@ -159,7 +159,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates or updates a scheduled action for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you have registered the resource as a scalable target using RegisterScalableTarget.

To update an action, specify its name and the parameters that you want to change. If you don't specify start and end times, the old values are deleted. Any other parameters that you don't specify are not changed by this update request.

You can view the scheduled actions using DescribeScheduledActions. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

Learn more about how to work with scheduled actions in the Application Auto Scaling User Guide.

" + "documentation":"

Creates or updates a scheduled action for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you have registered the resource as a scalable target.

When start and end times are specified with a recurring schedule using a cron expression or rates, they form the boundaries of when the recurring action starts and stops.

To update a scheduled action, specify the parameters that you want to change. If you don't specify start and end times, the old values are deleted.

For more information, see Scheduled Scaling in the Application Auto Scaling User Guide.

If a scalable target is deregistered, the scalable target is no longer available to run scheduled actions. Any scheduled actions that were specified for the scalable target are deleted.

" }, "RegisterScalableTarget":{ "name":"RegisterScalableTarget", @@ -175,7 +175,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out and scale in. Scalable targets are uniquely identified by the combination of resource ID, scalable dimension, and namespace.

When you register a new scalable target, you must specify values for minimum and maximum capacity. Application Auto Scaling will not scale capacity to values that are outside of this range.

To update a scalable target, specify the parameter that you want to change as well as the following parameters that identify the scalable target: resource ID, scalable dimension, and namespace. Any parameters that you don't specify are not changed by this update request.

After you register a scalable target, you do not need to register it again to use other Application Auto Scaling operations. To see which resources have been registered, use DescribeScalableTargets. You can also view the scaling policies for a service namespace by using DescribeScalableTargets.

If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget.

" + "documentation":"

Registers or updates a scalable target.

A scalable target is a resource that Application Auto Scaling can scale out and scale in. Scalable targets are uniquely identified by the combination of resource ID, scalable dimension, and namespace.

When you register a new scalable target, you must specify values for minimum and maximum capacity. Application Auto Scaling scaling policies will not scale capacity to values that are outside of this range.

After you register a scalable target, you do not need to register it again to use other Application Auto Scaling operations. To see which resources have been registered, use DescribeScalableTargets. You can also view the scaling policies for a service namespace by using DescribeScalableTargets. If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget.

To update a scalable target, specify the parameters that you want to change. Include the parameters that identify the scalable target: resource ID, scalable dimension, and namespace. Any parameters that you don't specify are not changed by this update request.

" } }, "shapes":{ @@ -247,7 +247,7 @@ "documentation":"

The unit of the metric.

" } }, - "documentation":"

Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Application Auto Scaling.

To create your customized metric specification:

  • Add values for each required parameter from CloudWatch. You can use an existing metric, or a new metric that you create. To use your own metric, you must first publish the metric to CloudWatch. For more information, see Publish Custom Metrics in the Amazon CloudWatch User Guide.

  • Choose a metric that changes proportionally with capacity. The value of the metric should increase or decrease in inverse proportion to the number of capacity units. That is, the value of the metric should decrease when capacity increases.

For more information about CloudWatch, see Amazon CloudWatch Concepts.

" + "documentation":"

Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Application Auto Scaling.

For information about the available metrics for a service, see AWS Services That Publish CloudWatch Metrics in the Amazon CloudWatch User Guide.

To create your customized metric specification:

  • Add values for each required parameter from CloudWatch. You can use an existing metric, or a new metric that you create. To use your own metric, you must first publish the metric to CloudWatch. For more information, see Publish Custom Metrics in the Amazon CloudWatch User Guide.

  • Choose a metric that changes proportionally with capacity. The value of the metric should increase or decrease in inverse proportion to the number of capacity units. That is, the value of the metric should decrease when capacity increases, and increase when capacity decreases.

For more information about CloudWatch, see Amazon CloudWatch Concepts.

" }, "DeleteScalingPolicyRequest":{ "type":"structure", @@ -264,15 +264,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" } } }, @@ -292,7 +292,7 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

" }, "ScheduledActionName":{ "shape":"ResourceIdMaxLen1600", @@ -300,11 +300,11 @@ }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" } } }, @@ -323,15 +323,15 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" } } }, @@ -346,15 +346,15 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

" }, "ResourceIds":{ "shape":"ResourceIdsMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "MaxResults":{ "shape":"MaxResults", @@ -385,15 +385,15 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "MaxResults":{ "shape":"MaxResults", @@ -428,15 +428,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "MaxResults":{ "shape":"MaxResults", @@ -471,15 +471,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. If you specify a scalable dimension, you must also specify a resource ID.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "MaxResults":{ "shape":"MaxResults", @@ -600,7 +600,9 @@ "ECSServiceAverageMemoryUtilization", "AppStreamAverageCapacityUtilization", "ComprehendInferenceUtilization", - "LambdaProvisionedConcurrencyUtilization" + "LambdaProvisionedConcurrencyUtilization", + "CassandraReadCapacityUtilization", + "CassandraWriteCapacityUtilization" ] }, "MetricUnit":{"type":"string"}, @@ -639,7 +641,7 @@ "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Spot Fleet request or ECS service.

The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:

  • app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN

  • targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.

" } }, - "documentation":"

Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.

" + "documentation":"

Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.

Only the AWS services that you're using send metrics to Amazon CloudWatch. To determine whether a desired metric already exists by looking up its namespace and dimension using the CloudWatch metrics dashboard in the console, follow the procedure in Building Dashboards with CloudWatch in the Application Auto Scaling User Guide.

" }, "PutScalingPolicyRequest":{ "type":"structure", @@ -656,19 +658,19 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

The policy type. This parameter is required if you are creating a scaling policy.

The following policy types are supported:

TargetTrackingScaling—Not supported for Amazon EMR

StepScaling—Not supported for DynamoDB, Amazon Comprehend, or AWS Lambda

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

" + "documentation":"

The policy type. This parameter is required if you are creating a scaling policy.

The following policy types are supported:

TargetTrackingScaling—Not supported for Amazon EMR

StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, or Amazon Keyspaces (for Apache Cassandra).

For more information, see Target Tracking Scaling Policies and Step Scaling Policies in the Application Auto Scaling User Guide.

" }, "StepScalingPolicyConfiguration":{ "shape":"StepScalingPolicyConfiguration", @@ -705,35 +707,35 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

" }, "Schedule":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

" + "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Specify the time in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

For examples of using these expressions, see Scheduled Scaling in the Application Auto Scaling User Guide.

" }, "ScheduledActionName":{ "shape":"ScheduledActionName", - "documentation":"

The name of the scheduled action.

" + "documentation":"

The name of the scheduled action. This name must be unique among all other scheduled actions on the specified scalable target.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "StartTime":{ "shape":"TimestampType", - "documentation":"

The date and time for the scheduled action to start.

" + "documentation":"

The date and time for this scheduled action to start.

" }, "EndTime":{ "shape":"TimestampType", - "documentation":"

The date and time for the scheduled action to end.

" + "documentation":"

The date and time for the recurring schedule to end.

" }, "ScalableTargetAction":{ "shape":"ScalableTargetAction", - "documentation":"

The new minimum and maximum capacity. You can set both values or just one. During the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.

" + "documentation":"

The new minimum and maximum capacity. You can set both values or just one. At the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.

" } } }, @@ -752,27 +754,27 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource. For a resource provided by your own application or service, use custom-resource instead.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "MinCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The minimum value to scale to in response to a scale-in event. MinCapacity is required to register a scalable target.

" + "documentation":"

The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand.

This parameter is required if you are registering a scalable target. For Lambda provisioned concurrency, the minimum value allowed is 0. For all other resources, the minimum value allowed is 1.

" }, "MaxCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The maximum value to scale to in response to a scale-out event. MaxCapacity is required to register a scalable target.

" + "documentation":"

The maximum value that you plan to scale out to. When a scaling policy is in effect, Application Auto Scaling can scale out (expand) as needed to the maximum capacity limit in response to changing demand.

This parameter is required if you are registering a scalable target.

" }, "RoleARN":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling.

For Amazon EMR, this parameter is required, and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

" + "documentation":"

This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which it creates if it does not yet exist. For more information, see Application Auto Scaling IAM Roles.

" }, "SuspendedState":{ "shape":"SuspendedState", @@ -820,7 +822,9 @@ "sagemaker:variant:DesiredInstanceCount", "custom-resource:ResourceType:Property", "comprehend:document-classifier-endpoint:DesiredInferenceUnits", - "lambda:function:ProvisionedConcurrency" + "lambda:function:ProvisionedConcurrency", + "cassandra:table:ReadCapacityUnits", + "cassandra:table:WriteCapacityUnits" ] }, "ScalableTarget":{ @@ -837,23 +841,23 @@ "members":{ "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource, or a custom-resource.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "MinCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The minimum value to scale to in response to a scale-in event.

" + "documentation":"

The minimum value to scale to in response to a scale-in activity.

" }, "MaxCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The maximum value to scale to in response to a scale-out event.

" + "documentation":"

The maximum value to scale to in response to a scale-out activity.

" }, "RoleARN":{ "shape":"ResourceIdMaxLen1600", @@ -872,7 +876,7 @@ "members":{ "MinCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The minimum capacity.

" + "documentation":"

The minimum capacity.

For Lambda provisioned concurrency, the minimum value allowed is 0. For all other resources, the minimum value allowed is 1.

" }, "MaxCapacity":{ "shape":"ResourceCapacity", @@ -908,15 +912,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource, or a custom-resource.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "Description":{ "shape":"XmlString", @@ -987,15 +991,15 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource, or a custom-resource.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "PolicyType":{ "shape":"PolicyType", @@ -1042,19 +1046,19 @@ }, "ServiceNamespace":{ "shape":"ServiceNamespace", - "documentation":"

The namespace of the AWS service that provides the resource or custom-resource for a resource provided by your own application or service. For more information, see AWS Service Namespaces in the Amazon Web Services General Reference.

" + "documentation":"

The namespace of the AWS service that provides the resource, or a custom-resource.

" }, "Schedule":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Specify the time, in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

" + "documentation":"

The schedule for this action. The following formats are supported:

  • At expressions - \"at(yyyy-mm-ddThh:mm:ss)\"

  • Rate expressions - \"rate(value unit)\"

  • Cron expressions - \"cron(fields)\"

At expressions are useful for one-time schedules. Specify the time in UTC.

For rate expressions, value is a positive integer and unit is minute | minutes | hour | hours | day | days.

For more information about cron expressions, see Cron Expressions in the Amazon CloudWatch Events User Guide.

For examples of using these expressions, see Scheduled Scaling in the Application Auto Scaling User Guide.

" }, "ResourceId":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

" + "documentation":"

The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.

  • ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp.

  • Spot Fleet request - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.

  • EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.

  • AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet.

  • DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table.

  • DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index.

  • Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster.

  • Amazon SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.

  • Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository.

  • Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.

  • Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1.

  • Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable.

" }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

" + "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

  • ecs:service:DesiredCount - The desired task count of an ECS service.

  • ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet request.

  • elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group.

  • appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet.

  • dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table.

  • dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table.

  • dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index.

  • dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index.

  • rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.

  • sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an Amazon SageMaker model endpoint variant.

  • custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service.

  • comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint.

  • lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function.

  • cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table.

  • cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table.

" }, "StartTime":{ "shape":"TimestampType", @@ -1066,7 +1070,7 @@ }, "ScalableTargetAction":{ "shape":"ScalableTargetAction", - "documentation":"

The new minimum and maximum capacity. You can set both values or just one. During the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.

" + "documentation":"

The new minimum and maximum capacity. You can set both values or just one. At the scheduled time, if the current capacity is below the minimum capacity, Application Auto Scaling scales out to the minimum capacity. If the current capacity is above the maximum capacity, Application Auto Scaling scales in to the maximum capacity.

" }, "CreationTime":{ "shape":"TimestampType", @@ -1097,7 +1101,8 @@ "sagemaker", "custom-resource", "comprehend", - "lambda" + "lambda", + "cassandra" ] }, "StepAdjustment":{ @@ -1114,10 +1119,10 @@ }, "ScalingAdjustment":{ "shape":"ScalingAdjustment", - "documentation":"

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current scalable dimension while a negative number removes from the current scalable dimension.

" + "documentation":"

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

" } }, - "documentation":"

Represents a step adjustment for a StepScalingPolicyConfiguration. Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm.

For the following examples, suppose that you have an alarm with a breach threshold of 50:

  • To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

  • To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

There are a few rules for the step adjustments for your step policy:

  • The ranges of your step adjustments can't overlap or have a gap.

  • At most one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

  • At most one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

  • The upper and lower bound can't be null in the same step adjustment.

" + "documentation":"

Represents a step adjustment for a StepScalingPolicyConfiguration. Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm.

For the following examples, suppose that you have an alarm with a breach threshold of 50:

  • To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

  • To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

There are a few rules for the step adjustments for your step policy:

  • The ranges of your step adjustments can't overlap or have a gap.

  • At most one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

  • At most one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

  • The upper and lower bound can't be null in the same step adjustment.

" }, "StepAdjustments":{ "type":"list", @@ -1128,19 +1133,19 @@ "members":{ "AdjustmentType":{ "shape":"AdjustmentType", - "documentation":"

Specifies whether the ScalingAdjustment value in a StepAdjustment is an absolute number or a percentage of the current capacity.

" + "documentation":"

Specifies whether the ScalingAdjustment value in a StepAdjustment is an absolute number or a percentage of the current capacity.

AdjustmentType is required if you are adding a new step scaling policy configuration.

" }, "StepAdjustments":{ "shape":"StepAdjustments", - "documentation":"

A set of adjustments that enable you to scale based on the size of the alarm breach.

" + "documentation":"

A set of adjustments that enable you to scale based on the size of the alarm breach.

At least one step adjustment is required if you are adding a new step scaling policy configuration.

" }, "MinAdjustmentMagnitude":{ "shape":"MinAdjustmentMagnitude", - "documentation":"

The minimum number to adjust your scalable dimension as a result of a scaling activity. If the adjustment type is PercentChangeInCapacity, the scaling policy changes the scalable dimension of the scalable target by this amount.

For example, suppose that you create a step scaling policy to scale out an Amazon ECS service by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the service has 4 tasks and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Application Auto Scaling scales out the service by 2 tasks.

" + "documentation":"

The minimum value to scale by when scaling by percentages. For example, suppose that you create a step scaling policy to scale out an Amazon ECS service by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the service has 4 tasks and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Application Auto Scaling scales out the service by 2 tasks.

Valid only if the adjustment type is PercentChangeInCapacity.

" }, "Cooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes where previous trigger-related scaling activities can influence future scaling events.

For scale-out policies, while the cooldown period is in effect, the capacity that has been added by the previous scale-out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out. For example, an alarm triggers a step scaling policy to scale out an Amazon ECS service by 2 tasks, the scaling activity completes successfully, and a cooldown period of 5 minutes starts. During the cooldown period, if the alarm triggers the same policy again but at a more aggressive step adjustment to scale out the service by 3 tasks, the 2 tasks that were added in the previous scale-out event are considered part of that capacity and only 1 additional task is added to the desired count.

For scale-in policies, the cooldown period is used to block subsequent scale-in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

" + "documentation":"

The amount of time, in seconds, to wait for a previous scaling activity to take effect.

With scale-out policies, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a step scaling policy, it starts to calculate the cooldown time. While the cooldown period is in effect, capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity. For example, when an alarm triggers a step scaling policy to increase the capacity by 2, the scaling activity completes successfully, and a cooldown period starts. If the alarm triggers again during the cooldown period but at a more aggressive step adjustment of 3, the previous increase of 2 is considered part of the current capacity. Therefore, only 1 is added to the capacity.

With scale-in policies, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the cooldown period after a scale-in activity, Application Auto Scaling scales out the target immediately. In this case, the cooldown period for the scale-in activity stops and doesn't complete.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

  • ECS services

  • Spot Fleet requests

  • EMR clusters

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • Amazon SageMaker endpoint variants

  • Custom resources

For all other scalable targets, the default value is 0:

  • DynamoDB tables

  • DynamoDB global secondary indexes

  • Amazon Comprehend document classification endpoints

  • Lambda provisioned concurrency

  • Amazon Keyspaces tables

" }, "MetricAggregationType":{ "shape":"MetricAggregationType", @@ -1185,15 +1190,15 @@ }, "ScaleOutCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scale-out activity completes before another scale-out activity can start.

While the cooldown period is in effect, the capacity that has been added by the previous scale-out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out.

" + "documentation":"

The amount of time, in seconds, to wait for a previous scale-out activity to take effect.

With the scale-out cooldown period, the intention is to continuously (but not excessively) scale out. After Application Auto Scaling successfully scales out using a target tracking scaling policy, it starts to calculate the cooldown time. While the scale-out cooldown period is in effect, the capacity added by the initiating scale-out activity is calculated as part of the desired capacity for the next scale-out activity.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

  • ECS services

  • Spot Fleet requests

  • EMR clusters

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • Amazon SageMaker endpoint variants

  • Custom resources

For all other scalable targets, the default value is 0:

  • DynamoDB tables

  • DynamoDB global secondary indexes

  • Amazon Comprehend document classification endpoints

  • Lambda provisioned concurrency

  • Amazon Keyspaces tables

" }, "ScaleInCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scale-in activity completes before another scale in activity can start.

The cooldown period is used to block subsequent scale-in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale-out policy during the cooldown period after a scale-in, Application Auto Scaling scales out your scalable target immediately.

" + "documentation":"

The amount of time, in seconds, after a scale-in activity completes before another scale-in activity can start.

With the scale-in cooldown period, the intention is to scale in conservatively to protect your application’s availability, so scale-in activities are blocked until the cooldown period has expired. However, if another alarm triggers a scale-out activity during the scale-in cooldown period, Application Auto Scaling scales out the target immediately. In this case, the scale-in cooldown period stops and doesn't complete.

Application Auto Scaling provides a default value of 300 for the following scalable targets:

  • ECS services

  • Spot Fleet requests

  • EMR clusters

  • AppStream 2.0 fleets

  • Aurora DB clusters

  • Amazon SageMaker endpoint variants

  • Custom resources

For all other scalable targets, the default value is 0:

  • DynamoDB tables

  • DynamoDB global secondary indexes

  • Amazon Comprehend document classification endpoints

  • Lambda provisioned concurrency

  • Amazon Keyspaces tables

" }, "DisableScaleIn":{ "shape":"DisableScaleIn", - "documentation":"

Indicates whether scale in by the target tracking scaling policy is disabled. If the value is true, scale in is disabled and the target tracking scaling policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking scaling policy can remove capacity from the scalable resource. The default value is false.

" + "documentation":"

Indicates whether scale in by the target tracking scaling policy is disabled. If the value is true, scale in is disabled and the target tracking scaling policy won't remove capacity from the scalable target. Otherwise, scale in is enabled and the target tracking scaling policy can remove capacity from the scalable target. The default value is false.

" } }, "documentation":"

Represents a target tracking scaling policy configuration to use with Application Auto Scaling.

" @@ -1212,5 +1217,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"

With Application Auto Scaling, you can configure automatic scaling for the following resources:

  • Amazon ECS services

  • Amazon EC2 Spot Fleet requests

  • Amazon EMR clusters

  • Amazon AppStream 2.0 fleets

  • Amazon DynamoDB tables and global secondary indexes throughput capacity

  • Amazon Aurora Replicas

  • Amazon SageMaker endpoint variants

  • Custom resources provided by your own applications or services

  • Amazon Comprehend document classification endpoints

  • AWS Lambda function provisioned concurrency

API Summary

The Application Auto Scaling service API includes three key sets of actions:

  • Register and manage scalable targets - Register AWS or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets.

  • Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history.

  • Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget action for any Application Auto Scaling scalable target. You can suspend and resume, individually or in combination, scale-out activities triggered by a scaling policy, scale-in activities triggered by a scaling policy, and scheduled scaling.

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

" + "documentation":"

With Application Auto Scaling, you can configure automatic scaling for the following resources:

  • Amazon ECS services

  • Amazon EC2 Spot Fleet requests

  • Amazon EMR clusters

  • Amazon AppStream 2.0 fleets

  • Amazon DynamoDB tables and global secondary indexes throughput capacity

  • Amazon Aurora Replicas

  • Amazon SageMaker endpoint variants

  • Custom resources provided by your own applications or services

  • Amazon Comprehend document classification endpoints

  • AWS Lambda function provisioned concurrency

  • Amazon Keyspaces (for Apache Cassandra) tables

API Summary

The Application Auto Scaling service API includes three key sets of actions:

  • Register and manage scalable targets - Register AWS or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets.

  • Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history.

  • Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can suspend and resume (individually or in combination) scale-out activities that are triggered by a scaling policy, scale-in activities that are triggered by a scaling policy, and scheduled scaling.

To learn more about Application Auto Scaling, including information about granting IAM users required permissions for Application Auto Scaling actions, see the Application Auto Scaling User Guide.

" } diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index 50c6113c08e7..e6a6eb07a420 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 3199433ce38f..8b100c7ce9b9 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index f26446eb08b7..d10f44e3aacf 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/appmesh/src/main/resources/codegen-resources/paginators-1.json b/services/appmesh/src/main/resources/codegen-resources/paginators-1.json index ca51591f058a..ac64b5684dab 100644 --- a/services/appmesh/src/main/resources/codegen-resources/paginators-1.json +++ b/services/appmesh/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,11 @@ { "pagination": { + "ListGatewayRoutes": { + "input_token": "nextToken", + "limit_key": "limit", + "output_token": "nextToken", + "result_key": "gatewayRoutes" + }, "ListMeshes": { "input_token": "nextToken", "limit_key": "limit", @@ -18,6 +24,12 @@ "output_token": "nextToken", "result_key": "tags" }, + "ListVirtualGateways": { + "input_token": "nextToken", + "limit_key": "limit", + "output_token": "nextToken", + "result_key": "virtualGateways" + }, "ListVirtualNodes": { "input_token": "nextToken", "limit_key": "limit", diff --git a/services/appmesh/src/main/resources/codegen-resources/service-2.json b/services/appmesh/src/main/resources/codegen-resources/service-2.json index dbef33f43d56..677662813455 100644 --- a/services/appmesh/src/main/resources/codegen-resources/service-2.json +++ b/services/appmesh/src/main/resources/codegen-resources/service-2.json @@ -11,8 +11,50 @@ "signingName": "appmesh", "uid": "appmesh-2019-01-25" }, - "documentation": "

AWS App Mesh is a service mesh based on the Envoy proxy that makes it easy to monitor and\n control microservices. App Mesh standardizes how your microservices communicate, giving you\n end-to-end visibility and helping to ensure high availability for your applications.

\n

App Mesh gives you consistent visibility and network traffic controls for every\n microservice in an application. You can use App Mesh with AWS Fargate, Amazon ECS, Amazon EKS,\n Kubernetes on AWS, and Amazon EC2.

\n \n

App Mesh supports microservice applications that use service discovery naming for their\n components. For more information about service discovery on Amazon ECS, see Service Discovery in the\n Amazon Elastic Container Service Developer Guide. Kubernetes kube-dns and\n coredns are supported. For more information, see DNS\n for Services and Pods in the Kubernetes documentation.

\n
", + "documentation": "

AWS App Mesh is a service mesh based on the Envoy proxy that makes it easy to monitor and\n control microservices. App Mesh standardizes how your microservices communicate, giving you\n end-to-end visibility and helping to ensure high availability for your applications.

\n

App Mesh gives you consistent visibility and network traffic controls for every\n microservice in an application. You can use App Mesh with AWS Fargate, Amazon ECS, Amazon EKS,\n Kubernetes on AWS, and Amazon EC2.

\n \n

App Mesh supports microservice applications that use service discovery naming for their\n components. For more information about service discovery on Amazon ECS, see Service Discovery in the Amazon Elastic Container Service Developer Guide. Kubernetes\n kube-dns and coredns are supported. For more information,\n see DNS\n for Services and Pods in the Kubernetes documentation.

\n
", "operations": { + "CreateGatewayRoute": { + "name": "CreateGatewayRoute", + "http": { + "method": "PUT", + "requestUri": "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes", + "responseCode": 200 + }, + "input": { + "shape": "CreateGatewayRouteInput" + }, + "output": { + "shape": "CreateGatewayRouteOutput" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "LimitExceededException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "documentation": "

Creates a gateway route.

\n

A gateway route is attached to a virtual gateway and routes traffic to an existing\n virtual service. If a route matches a request, it can distribute traffic to a target virtual service.

\n

For more information about gateway routes, see Gateway routes.

", + "idempotent": true + }, "CreateMesh": { "name": "CreateMesh", "http": { @@ -52,7 +94,7 @@ "shape": "TooManyRequestsException" } ], - "documentation": "

Creates a service mesh. A service mesh is a logical boundary for network traffic between\n the services that reside within it.

\n

After you create your service mesh, you can create virtual services, virtual nodes,\n virtual routers, and routes to distribute traffic between the applications in your\n mesh.

", + "documentation": "

Creates a service mesh.

\n

A service mesh is a logical boundary for network traffic between services that are\n represented by resources within the mesh. After you create your service mesh, you can\n create virtual services, virtual nodes, virtual routers, and routes to distribute traffic\n between the applications in your mesh.

\n

For more information about service meshes, see Service meshes.

", "idempotent": true }, "CreateRoute": { @@ -94,7 +136,49 @@ "shape": "TooManyRequestsException" } ], - "documentation": "

Creates a route that is associated with a virtual router.

\n

You can use the prefix parameter in your route specification for path-based\n routing of requests. For example, if your virtual service name is\n my-service.local and you want the route to match requests to\n my-service.local/metrics, your prefix should be\n /metrics.

\n

If your route matches a request, you can distribute traffic to one or more target\n virtual nodes with relative weighting.

\n

For more information about routes, see Routes.

", + "documentation": "

Creates a route that is associated with a virtual router.

\n

You can route several different protocols and define a retry policy for a route.\n Traffic can be routed to one or more virtual nodes.

\n

For more information about routes, see Routes.

", + "idempotent": true + }, + "CreateVirtualGateway": { + "name": "CreateVirtualGateway", + "http": { + "method": "PUT", + "requestUri": "/v20190125/meshes/{meshName}/virtualGateways", + "responseCode": 200 + }, + "input": { + "shape": "CreateVirtualGatewayInput" + }, + "output": { + "shape": "CreateVirtualGatewayOutput" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "LimitExceededException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "documentation": "

Creates a virtual gateway.

\n

A virtual gateway allows resources outside your mesh to communicate to resources that\n are inside your mesh. The virtual gateway represents an Envoy proxy running in an Amazon ECS\n task, in a Kubernetes service, or on an Amazon EC2 instance. Unlike a virtual node, which\n represents an Envoy running with an application, a virtual gateway represents Envoy deployed by itself.

\n

For more information about virtual gateways, see Virtual gateways.

", "idempotent": true }, "CreateVirtualNode": { @@ -136,7 +220,7 @@ "shape": "TooManyRequestsException" } ], - "documentation": "

Creates a virtual node within a service mesh.

\n

A virtual node acts as a logical pointer to a particular task group, such as an Amazon ECS\n service or a Kubernetes deployment. When you create a virtual node, you can specify the\n service discovery information for your task group.

\n

Any inbound traffic that your virtual node expects should be specified as a\n listener. Any outbound traffic that your virtual node expects to reach\n should be specified as a backend.

\n

The response metadata for your new virtual node contains the arn that is\n associated with the virtual node. Set this value (either the full ARN or the truncated\n resource name: for example, mesh/default/virtualNode/simpleapp) as the\n APPMESH_VIRTUAL_NODE_NAME environment variable for your task group's Envoy\n proxy container in your task definition or pod spec. This is then mapped to the\n node.id and node.cluster Envoy parameters.

\n \n

If you require your Envoy stats or tracing to use a different name, you can override\n the node.cluster value that is set by\n APPMESH_VIRTUAL_NODE_NAME with the\n APPMESH_VIRTUAL_NODE_CLUSTER environment variable.

\n
\n

For more information about virtual nodes, see Virtual Nodes.

", + "documentation": "

Creates a virtual node within a service mesh.

\n

A virtual node acts as a logical pointer to a particular task group, such as an Amazon ECS\n service or a Kubernetes deployment. When you create a virtual node, you can specify the\n service discovery information for your task group, and whether the proxy running in a task\n group will communicate with other proxies using Transport Layer Security (TLS).

\n

You define a listener for any inbound traffic that your virtual node\n expects. Any virtual service that your virtual node expects to communicate to is specified\n as a backend.

\n

The response metadata for your new virtual node contains the arn that is\n associated with the virtual node. Set this value (either the full ARN or the truncated\n resource name: for example, mesh/default/virtualNode/simpleapp) as the\n APPMESH_VIRTUAL_NODE_NAME environment variable for your task group's Envoy\n proxy container in your task definition or pod spec. This is then mapped to the\n node.id and node.cluster Envoy parameters.

\n \n

If you require your Envoy stats or tracing to use a different name, you can override\n the node.cluster value that is set by\n APPMESH_VIRTUAL_NODE_NAME with the\n APPMESH_VIRTUAL_NODE_CLUSTER environment variable.

\n
\n

For more information about virtual nodes, see Virtual nodes.

", "idempotent": true }, "CreateVirtualRouter": { @@ -178,7 +262,7 @@ "shape": "TooManyRequestsException" } ], - "documentation": "

Creates a virtual router within a service mesh.

\n

Any inbound traffic that your virtual router expects should be specified as a\n listener.

\n

Virtual routers handle traffic for one or more virtual services within your mesh. After\n you create your virtual router, create and associate routes for your virtual router that\n direct incoming requests to different virtual nodes.

\n

For more information about virtual routers, see Virtual Routers.

", + "documentation": "

Creates a virtual router within a service mesh.

\n

Specify a listener for any inbound traffic that your virtual router\n receives. Create a virtual router for each protocol and port that you need to route.\n Virtual routers handle traffic for one or more virtual services within your mesh. After you\n create your virtual router, create and associate routes for your virtual router that direct\n incoming requests to different virtual nodes.

\n

For more information about virtual routers, see Virtual routers.

", "idempotent": true }, "CreateVirtualService": { @@ -220,7 +304,46 @@ "shape": "TooManyRequestsException" } ], - "documentation": "

Creates a virtual service within a service mesh.

\n

A virtual service is an abstraction of a real service that is provided by a virtual node\n directly or indirectly by means of a virtual router. Dependent services call your virtual\n service by its virtualServiceName, and those requests are routed to the\n virtual node or virtual router that is specified as the provider for the virtual\n service.

\n

For more information about virtual services, see Virtual Services.

", + "documentation": "

Creates a virtual service within a service mesh.

\n

A virtual service is an abstraction of a real service that is provided by a virtual node\n directly or indirectly by means of a virtual router. Dependent services call your virtual\n service by its virtualServiceName, and those requests are routed to the\n virtual node or virtual router that is specified as the provider for the virtual\n service.

\n

For more information about virtual services, see Virtual services.

", + "idempotent": true + }, + "DeleteGatewayRoute": { + "name": "DeleteGatewayRoute", + "http": { + "method": "DELETE", + "requestUri": "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteGatewayRouteInput" + }, + "output": { + "shape": "DeleteGatewayRouteOutput" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ResourceInUseException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "documentation": "

Deletes an existing gateway route.

", "idempotent": true }, "DeleteMesh": { @@ -301,6 +424,45 @@ "documentation": "

Deletes an existing route.

", "idempotent": true }, + "DeleteVirtualGateway": { + "name": "DeleteVirtualGateway", + "http": { + "method": "DELETE", + "requestUri": "/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteVirtualGatewayInput" + }, + "output": { + "shape": "DeleteVirtualGatewayOutput" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ResourceInUseException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "documentation": "

Deletes an existing virtual gateway. You cannot delete a virtual gateway if any gateway\n routes are associated to it.

", + "idempotent": true + }, "DeleteVirtualNode": { "name": "DeleteVirtualNode", "http": { @@ -405,6 +567,9 @@ { "shape": "NotFoundException" }, + { + "shape": "ResourceInUseException" + }, { "shape": "ServiceUnavailableException" }, @@ -415,6 +580,41 @@ "documentation": "

Deletes an existing virtual service.

", "idempotent": true }, + "DescribeGatewayRoute": { + "name": "DescribeGatewayRoute", + "http": { + "method": "GET", + "requestUri": "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeGatewayRouteInput" + }, + "output": { + "shape": "DescribeGatewayRouteOutput" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "documentation": "

Describes an existing gateway route.

" + }, "DescribeMesh": { "name": "DescribeMesh", "http": { @@ -485,6 +685,41 @@ ], "documentation": "

Describes an existing route.

" }, + "DescribeVirtualGateway": { + "name": "DescribeVirtualGateway", + "http": { + "method": "GET", + "requestUri": "/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeVirtualGatewayInput" + }, + "output": { + "shape": "DescribeVirtualGatewayOutput" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "documentation": "

Describes an existing virtual gateway.

" + }, "DescribeVirtualNode": { "name": "DescribeVirtualNode", "http": { @@ -590,6 +825,41 @@ ], "documentation": "

Describes an existing virtual service.

" }, + "ListGatewayRoutes": { + "name": "ListGatewayRoutes", + "http": { + "method": "GET", + "requestUri": "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes", + "responseCode": 200 + }, + "input": { + "shape": "ListGatewayRoutesInput" + }, + "output": { + "shape": "ListGatewayRoutesOutput" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "documentation": "

Returns a list of existing gateway routes that are associated to a virtual\n gateway.

" + }, "ListMeshes": { "name": "ListMeshes", "http": { @@ -695,18 +965,18 @@ ], "documentation": "

List the tags for an App Mesh resource.

" }, - "ListVirtualNodes": { - "name": "ListVirtualNodes", + "ListVirtualGateways": { + "name": "ListVirtualGateways", "http": { "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualNodes", + "requestUri": "/v20190125/meshes/{meshName}/virtualGateways", "responseCode": 200 }, "input": { - "shape": "ListVirtualNodesInput" + "shape": "ListVirtualGatewaysInput" }, "output": { - "shape": "ListVirtualNodesOutput" + "shape": "ListVirtualGatewaysOutput" }, "errors": [ { @@ -728,20 +998,20 @@ "shape": "TooManyRequestsException" } ], - "documentation": "

Returns a list of existing virtual nodes.

" + "documentation": "

Returns a list of existing virtual gateways in a service mesh.

" }, - "ListVirtualRouters": { - "name": "ListVirtualRouters", + "ListVirtualNodes": { + "name": "ListVirtualNodes", "http": { "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualRouters", + "requestUri": "/v20190125/meshes/{meshName}/virtualNodes", "responseCode": 200 }, "input": { - "shape": "ListVirtualRoutersInput" + "shape": "ListVirtualNodesInput" }, "output": { - "shape": "ListVirtualRoutersOutput" + "shape": "ListVirtualNodesOutput" }, "errors": [ { @@ -763,20 +1033,55 @@ "shape": "TooManyRequestsException" } ], - "documentation": "

Returns a list of existing virtual routers in a service mesh.

" + "documentation": "

Returns a list of existing virtual nodes.

" }, - "ListVirtualServices": { - "name": "ListVirtualServices", + "ListVirtualRouters": { + "name": "ListVirtualRouters", "http": { "method": "GET", - "requestUri": "/v20190125/meshes/{meshName}/virtualServices", + "requestUri": "/v20190125/meshes/{meshName}/virtualRouters", "responseCode": 200 }, "input": { - "shape": "ListVirtualServicesInput" + "shape": "ListVirtualRoutersInput" }, "output": { - "shape": "ListVirtualServicesOutput" + "shape": "ListVirtualRoutersOutput" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "documentation": "

Returns a list of existing virtual routers in a service mesh.

" + }, + "ListVirtualServices": { + "name": "ListVirtualServices", + "http": { + "method": "GET", + "requestUri": "/v20190125/meshes/{meshName}/virtualServices", + "responseCode": 200 + }, + "input": { + "shape": "ListVirtualServicesInput" + }, + "output": { + "shape": "ListVirtualServicesOutput" }, "errors": [ { @@ -875,6 +1180,48 @@ "documentation": "

Deletes specified tags from a resource.

", "idempotent": true }, + "UpdateGatewayRoute": { + "name": "UpdateGatewayRoute", + "http": { + "method": "PUT", + "requestUri": "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateGatewayRouteInput" + }, + "output": { + "shape": "UpdateGatewayRouteOutput" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "LimitExceededException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "documentation": "

Updates an existing gateway route that is associated to a specified virtual gateway in a\n service mesh.

", + "idempotent": true + }, "UpdateMesh": { "name": "UpdateMesh", "http": { @@ -956,6 +1303,48 @@ "documentation": "

Updates an existing route for a specified service mesh and virtual router.

", "idempotent": true }, + "UpdateVirtualGateway": { + "name": "UpdateVirtualGateway", + "http": { + "method": "PUT", + "requestUri": "/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateVirtualGatewayInput" + }, + "output": { + "shape": "UpdateVirtualGatewayOutput" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "LimitExceededException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "documentation": "

Updates an existing virtual gateway in a specified service mesh.

", + "idempotent": true + }, "UpdateVirtualNode": { "name": "UpdateVirtualNode", "http": { @@ -1125,7 +1514,7 @@ }, "httpRetryEvents": { "shape": "HttpRetryPolicyEvents", - "documentation": "

Specify at least one of the following values.

\n
    \n
  • \n

    \n server-error – HTTP status codes 500, 501,\n 502, 503, 504, 505, 506, 507, 508, 510, and 511

    \n
  • \n
  • \n

    \n gateway-error – HTTP status codes 502,\n 503, and 504

    \n
  • \n
  • \n

    \n client-error – HTTP status code 409

    \n
  • \n
  • \n

    \n stream-error – Retry on refused\n stream

    \n
  • \n
" + "documentation": "

Specify at least one of the following values.

\n
    \n
  • \n

    \n server-error – HTTP status codes 500, 501,\n 502, 503, 504, 505, 506, 507, 508, 510, and 511

    \n
  • \n
  • \n

    \n gateway-error – HTTP status codes 502,\n 503, and 504

    \n
  • \n
  • \n

    \n client-error – HTTP status code 409

    \n
  • \n
  • \n

    \n stream-error – Retry on refused\n stream

    \n
  • \n
" }, "maxRetries": { "shape": "MaxRetries", @@ -1201,6 +1590,82 @@ }, "documentation": "" }, + "CreateVirtualGatewayInput": { + "type": "structure", + "required": [ + "meshName", + "spec", + "virtualGatewayName" + ], + "members": { + "clientToken": { + "shape": "String", + "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken": true + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh to create the virtual gateway in.

", + "location": "uri", + "locationName": "meshName" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", + "location": "querystring", + "locationName": "meshOwner" + }, + "spec": { + "shape": "VirtualGatewaySpec", + "documentation": "

The virtual gateway specification to apply.

" + }, + "tags": { + "shape": "TagList", + "documentation": "

Optional metadata that you can apply to the virtual gateway to assist with\n categorization and organization. Each tag consists of a key and an optional value, both of\n which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name to use for the virtual gateway.

" + } + } + }, + "UpdateVirtualGatewayInput": { + "type": "structure", + "required": [ + "meshName", + "spec", + "virtualGatewayName" + ], + "members": { + "clientToken": { + "shape": "String", + "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken": true + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh that the virtual gateway resides in.

", + "location": "uri", + "locationName": "meshName" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location": "querystring", + "locationName": "meshOwner" + }, + "spec": { + "shape": "VirtualGatewaySpec", + "documentation": "

The new virtual gateway specification to apply. This overwrites the existing\n data.

" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name of the virtual gateway to update.

", + "location": "uri", + "locationName": "virtualGatewayName" + } + } + }, "ResourceMetadata": { "type": "structure", "required": [ @@ -1227,11 +1692,11 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner, or another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, "uid": { "shape": "String", @@ -1239,7 +1704,7 @@ }, "version": { "shape": "Long", - "documentation": "

The version of the resource. Resources are created at version 1, and this version is\n incremented each time that they're updated.

" + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" } }, "documentation": "

An object that represents metadata for a resource.

" @@ -1322,6 +1787,19 @@ "senderFault": true } }, + "HttpGatewayRouteMatch": { + "type": "structure", + "required": [ + "prefix" + ], + "members": { + "prefix": { + "shape": "String", + "documentation": "

Specifies the path to match requests with. This parameter must always start with\n /, which by itself matches all requests to the virtual service name. You\n can also match for path-based routing of requests. For example, if your virtual service\n name is my-service.local and you want the route to match requests to\n my-service.local/metrics, your prefix should be\n /metrics.

" + } + }, + "documentation": "

An object that represents the criteria for determining a request match.

" + }, "GrpcRouteMetadataList": { "type": "list", "member": { @@ -1358,7 +1836,7 @@ }, "path": { "shape": "String", - "documentation": "

The destination path for the health check request. This value is only used if the specified \n protocol is HTTP or HTTP/2. For any other protocol, this value is ignored.

" + "documentation": "

The destination path for the health check request. This value is only used if the\n specified protocol is HTTP or HTTP/2. For any other protocol, this value is ignored.

" }, "port": { "shape": "PortNumber", @@ -1366,7 +1844,7 @@ }, "protocol": { "shape": "PortProtocol", - "documentation": "

The protocol for the health check request. If you specify grpc, then your service must conform to the GRPC Health Checking Protocol.

" + "documentation": "

The protocol for the health check request. If you specify grpc, then your\n service must conform to the GRPC Health\n Checking Protocol.

" }, "timeoutMillis": { "shape": "HealthCheckTimeoutMillis", @@ -1379,6 +1857,12 @@ }, "documentation": "

An object that represents the health check policy for a virtual node's listener.

" }, + "VirtualGatewayHealthCheckTimeoutMillis": { + "type": "long", + "box": true, + "min": 2000, + "max": 60000 + }, "EgressFilter": { "type": "structure", "required": [ @@ -1408,15 +1892,54 @@ }, "documentation": "

An object that represents a client policy.

" }, + "VirtualGatewayHealthCheckIntervalMillis": { + "type": "long", + "box": true, + "min": 5000, + "max": 300000 + }, "Boolean": { "type": "boolean", "box": true }, + "VirtualGatewaySpec": { + "type": "structure", + "required": [ + "listeners" + ], + "members": { + "backendDefaults": { + "shape": "VirtualGatewayBackendDefaults", + "documentation": "

A reference to an object that represents the defaults for backends.

" + }, + "listeners": { + "shape": "VirtualGatewayListeners", + "documentation": "

The listeners that the mesh endpoint is expected to receive inbound traffic from. You\n can specify one listener.

" + }, + "logging": { + "shape": "VirtualGatewayLogging" + } + }, + "documentation": "

An object that represents the specification of a service mesh resource.

" + }, "HttpRetryPolicyEvent": { "type": "string", "min": 1, "max": 25 }, + "VirtualGatewayFileAccessLog": { + "type": "structure", + "required": [ + "path" + ], + "members": { + "path": { + "shape": "FilePath", + "documentation": "

The file path to write access logs to. You can use /dev/stdout to send\n access logs to standard out and configure your Envoy container to use a log driver, such as\n awslogs, to export the access logs to a log storage service such as Amazon\n CloudWatch Logs. You can also specify a path in the Envoy container's file system to write\n the files to disk.

" + } + }, + "documentation": "

An object that represents an access log file.

" + }, "DescribeVirtualServiceOutput": { "type": "structure", "required": [ @@ -1431,6 +1954,52 @@ "documentation": "", "payload": "virtualService" }, + "CreateGatewayRouteInput": { + "type": "structure", + "required": [ + "gatewayRouteName", + "meshName", + "spec", + "virtualGatewayName" + ], + "members": { + "clientToken": { + "shape": "String", + "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken": true + }, + "gatewayRouteName": { + "shape": "ResourceName", + "documentation": "

The name to use for the gateway route.

" + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh to create the gateway route in.

", + "location": "uri", + "locationName": "meshName" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", + "location": "querystring", + "locationName": "meshOwner" + }, + "spec": { + "shape": "GatewayRouteSpec", + "documentation": "

The gateway route specification to apply.

" + }, + "tags": { + "shape": "TagList", + "documentation": "

Optional metadata that you can apply to the gateway route to assist with categorization\n and organization. Each tag consists of a key and an optional value, both of which you\n define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters.

" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name of the virtual gateway to associate the gateway route with. If the virtual\n gateway is in a shared mesh, then you must be the owner of the virtual gateway\n resource.

", + "location": "uri", + "locationName": "virtualGatewayName" + } + } + }, "CertificateAuthorityArns": { "type": "list", "member": { @@ -1459,20 +2028,73 @@ "max": 1024, "pattern": "((?=^.{1,127}$)^([a-zA-Z0-9_][a-zA-Z0-9-_]{0,61}[a-zA-Z0-9_]|[a-zA-Z0-9])(.([a-zA-Z0-9_][a-zA-Z0-9-_]{0,61}[a-zA-Z0-9_]|[a-zA-Z0-9]))*$)|(^.$)" }, - "CreateRouteOutput": { + "VirtualGatewayData": { "type": "structure", "required": [ - "route" + "meshName", + "metadata", + "spec", + "status", + "virtualGatewayName" ], "members": { - "route": { - "shape": "RouteData", - "documentation": "

The full description of your mesh following the create call.

" - } - }, - "documentation": "", + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh that the virtual gateway resides in.

" + }, + "metadata": { + "shape": "ResourceMetadata" + }, + "spec": { + "shape": "VirtualGatewaySpec", + "documentation": "

The specifications of the virtual gateway.

" + }, + "status": { + "shape": "VirtualGatewayStatus", + "documentation": "

The current status of the virtual gateway.

" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name of the virtual gateway.

" + } + }, + "documentation": "

An object that represents a virtual gateway returned by a describe operation.

" + }, + "CreateRouteOutput": { + "type": "structure", + "required": [ + "route" + ], + "members": { + "route": { + "shape": "RouteData", + "documentation": "

The full description of your mesh following the create call.

" + } + }, + "documentation": "", "payload": "route" }, + "VirtualGatewayListener": { + "type": "structure", + "required": [ + "portMapping" + ], + "members": { + "healthCheck": { + "shape": "VirtualGatewayHealthCheckPolicy", + "documentation": "

The health check information for the listener.

" + }, + "portMapping": { + "shape": "VirtualGatewayPortMapping", + "documentation": "

The port mapping information for the listener.

" + }, + "tls": { + "shape": "VirtualGatewayListenerTls", + "documentation": "

A reference to an object that represents the Transport Layer Security (TLS) properties for the listener.

" + } + }, + "documentation": "

An object that represents a listener for a virtual gateway.

" + }, "DnsServiceDiscovery": { "type": "structure", "required": [ @@ -1486,6 +2108,37 @@ }, "documentation": "

An object that represents the DNS service discovery information for your virtual\n node.

" }, + "VirtualGatewayPortMapping": { + "type": "structure", + "required": [ + "port", + "protocol" + ], + "members": { + "port": { + "shape": "PortNumber", + "documentation": "

The port used for the port mapping. Specify one protocol.

" + }, + "protocol": { + "shape": "VirtualGatewayPortProtocol", + "documentation": "

The protocol used for the port mapping.

" + } + }, + "documentation": "

An object that represents a port mapping.

" + }, + "DeleteVirtualGatewayOutput": { + "type": "structure", + "required": [ + "virtualGateway" + ], + "members": { + "virtualGateway": { + "shape": "VirtualGatewayData", + "documentation": "

The virtual gateway that was deleted.

" + } + }, + "payload": "virtualGateway" + }, "DeleteRouteInput": { "type": "structure", "required": [ @@ -1502,7 +2155,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -1559,12 +2212,36 @@ "members": { }, "documentation": "" }, + "ListGatewayRoutesLimit": { + "type": "integer", + "box": true, + "min": 1, + "max": 100 + }, "TcpRetryPolicyEvent": { "type": "string", "enum": [ "connection-error" ] }, + "VirtualGatewayListenerTls": { + "type": "structure", + "required": [ + "certificate", + "mode" + ], + "members": { + "certificate": { + "shape": "VirtualGatewayListenerTlsCertificate", + "documentation": "

An object that represents a Transport Layer Security (TLS) certificate.

" + }, + "mode": { + "shape": "VirtualGatewayListenerTlsMode", + "documentation": "

Specify one of the following modes.

\n
    \n
  • \n

    \n STRICT – Listener only accepts connections with TLS\n enabled.

    \n
  • \n
  • \n

    \n PERMISSIVE – Listener accepts connections with or\n without TLS enabled.

    \n
  • \n
  • \n

    \n DISABLED – Listener only accepts connections without\n TLS.

    \n
  • \n
" + } + }, + "documentation": "

An object that represents the Transport Layer Security (TLS) properties for a listener.

" + }, "Backend": { "type": "structure", "members": { @@ -1593,6 +2270,63 @@ }, "documentation": "" }, + "VirtualGatewayListenerTlsFileCertificate": { + "type": "structure", + "required": [ + "certificateChain", + "privateKey" + ], + "members": { + "certificateChain": { + "shape": "FilePath", + "documentation": "

The certificate chain for the certificate.

" + }, + "privateKey": { + "shape": "FilePath", + "documentation": "

The private key for a certificate stored on the file system of the mesh endpoint that\n the proxy is running on.

" + } + }, + "documentation": "

An object that represents a local file certificate.\n The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" + }, + "ListGatewayRoutesInput": { + "type": "structure", + "required": [ + "meshName", + "virtualGatewayName" + ], + "members": { + "limit": { + "shape": "ListGatewayRoutesLimit", + "documentation": "

The maximum number of results returned by ListGatewayRoutes in paginated\n output. When you use this parameter, ListGatewayRoutes returns only\n limit results in a single page along with a nextToken response\n element. You can see the remaining results of the initial request by sending another\n ListGatewayRoutes request with the returned nextToken value.\n This value can be between 1 and 100. If you don't use this\n parameter, ListGatewayRoutes returns up to 100 results and a\n nextToken value if applicable.

", + "location": "querystring", + "locationName": "limit" + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh to list gateway routes in.

", + "location": "uri", + "locationName": "meshName" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location": "querystring", + "locationName": "meshOwner" + }, + "nextToken": { + "shape": "String", + "documentation": "

The nextToken value returned from a previous paginated\n ListGatewayRoutes request where limit was used and the results\n exceeded the value of that parameter. Pagination continues from the end of the previous\n results that returned the nextToken value.

", + "location": "querystring", + "locationName": "nextToken" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name of the virtual gateway to list gateway routes in.

", + "location": "uri", + "locationName": "virtualGatewayName" + } + } + }, "VirtualRouterData": { "type": "structure", "required": [ @@ -1650,6 +2384,47 @@ }, "documentation": "" }, + "VirtualGatewayHealthCheckPolicy": { + "type": "structure", + "required": [ + "healthyThreshold", + "intervalMillis", + "protocol", + "timeoutMillis", + "unhealthyThreshold" + ], + "members": { + "healthyThreshold": { + "shape": "VirtualGatewayHealthCheckThreshold", + "documentation": "

The number of consecutive successful health checks that must occur before declaring the\n listener healthy.

" + }, + "intervalMillis": { + "shape": "VirtualGatewayHealthCheckIntervalMillis", + "documentation": "

The time period in milliseconds between each health check execution.

" + }, + "path": { + "shape": "String", + "documentation": "

The destination path for the health check request. This value is only used if the\n specified protocol is HTTP or HTTP/2. For any other protocol, this value is ignored.

" + }, + "port": { + "shape": "PortNumber", + "documentation": "

The destination port for the health check request. This port must match the port defined\n in the PortMapping for the listener.

" + }, + "protocol": { + "shape": "VirtualGatewayPortProtocol", + "documentation": "

The protocol for the health check request. If you specify grpc, then your\n service must conform to the GRPC Health\n Checking Protocol.

" + }, + "timeoutMillis": { + "shape": "VirtualGatewayHealthCheckTimeoutMillis", + "documentation": "

The amount of time to wait when receiving a response from the health check, in\n milliseconds.

" + }, + "unhealthyThreshold": { + "shape": "VirtualGatewayHealthCheckThreshold", + "documentation": "

The number of consecutive failed health checks that must occur before declaring a\n virtual gateway unhealthy.

" + } + }, + "documentation": "

An object that represents the health check policy for a virtual gateway's\n listener.

" + }, "CreateVirtualRouterInput": { "type": "structure", "required": [ @@ -1671,7 +2446,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -1756,6 +2531,20 @@ "min": 1, "max": 25 }, + "VirtualGatewayListenerTlsCertificate": { + "type": "structure", + "members": { + "acm": { + "shape": "VirtualGatewayListenerTlsAcmCertificate", + "documentation": "

A reference to an object that represents an AWS Certicate Manager (ACM) certificate.

" + }, + "file": { + "shape": "VirtualGatewayListenerTlsFileCertificate", + "documentation": "

A reference to an object that represents a local file certificate.

" + } + }, + "documentation": "

An object that represents a listener's Transport Layer Security (TLS) certificate.

" + }, "ListenerTlsCertificate": { "type": "structure", "members": { @@ -1787,11 +2576,24 @@ "members": { "listeners": { "shape": "VirtualRouterListeners", - "documentation": "

The listeners that the virtual router is expected to receive inbound traffic from.\n You can specify one listener.

" + "documentation": "

The listeners that the virtual router is expected to receive inbound traffic from. You\n can specify one listener.

" } }, "documentation": "

An object that represents the specification of a virtual router.

" }, + "GatewayRouteVirtualService": { + "type": "structure", + "required": [ + "virtualServiceName" + ], + "members": { + "virtualServiceName": { + "shape": "ResourceName", + "documentation": "

The name of the virtual service that traffic is routed to.

" + } + }, + "documentation": "

An object that represents the virtual service that traffic is routed to.

" + }, "VirtualNodeSpec": { "type": "structure", "members": { @@ -1805,7 +2607,7 @@ }, "listeners": { "shape": "Listeners", - "documentation": "

The listener that the virtual node is expected to receive inbound traffic from.\n You can specify one listener.

" + "documentation": "

The listener that the virtual node is expected to receive inbound traffic from. You can\n specify one listener.

" }, "logging": { "shape": "Logging", @@ -1813,7 +2615,7 @@ }, "serviceDiscovery": { "shape": "ServiceDiscovery", - "documentation": "

The service discovery information for the virtual node. If your virtual node does not\n expect ingress traffic, you can omit this parameter. If you specify a listener,\n then you must specify service discovery information.

" + "documentation": "

The service discovery information for the virtual node. If your virtual node does not\n expect ingress traffic, you can omit this parameter. If you specify a\n listener, then you must specify service discovery information.

" } }, "documentation": "

An object that represents the specification of a virtual node.

" @@ -1843,6 +2645,24 @@ "min": 1, "max": 1 }, + "GatewayRouteSpec": { + "type": "structure", + "members": { + "grpcRoute": { + "shape": "GrpcGatewayRoute", + "documentation": "

An object that represents the specification of a gRPC gateway route.

" + }, + "http2Route": { + "shape": "HttpGatewayRoute", + "documentation": "

An object that represents the specification of an HTTP/2 gateway route.

" + }, + "httpRoute": { + "shape": "HttpGatewayRoute", + "documentation": "

An object that represents the specification of an HTTP gateway route.

" + } + }, + "documentation": "

An object that represents a gateway route specification. Specify one gateway route\n type.

" + }, "PortSet": { "type": "list", "member": { @@ -1878,6 +2698,37 @@ "senderFault": true } }, + "VirtualGatewayBackendDefaults": { + "type": "structure", + "members": { + "clientPolicy": { + "shape": "VirtualGatewayClientPolicy", + "documentation": "

A reference to an object that represents a client policy.

" + } + }, + "documentation": "

An object that represents the default properties for a backend.

" + }, + "ListenerTimeout": { + "type": "structure", + "members": { + "grpc": { + "shape": "GrpcTimeout" + }, + "http": { + "shape": "HttpTimeout", + "documentation": "

An object that represents types of timeouts.

" + }, + "http2": { + "shape": "HttpTimeout", + "documentation": "

An object that represents types of timeouts.

" + }, + "tcp": { + "shape": "TcpTimeout", + "documentation": "

An object that represents types of timeouts.

" + } + }, + "documentation": "

An object that represents timeouts for different protocols.

" + }, "MeshList": { "type": "list", "member": { @@ -1889,12 +2740,46 @@ "box": true, "min": 0 }, + "DescribeGatewayRouteInput": { + "type": "structure", + "required": [ + "gatewayRouteName", + "meshName", + "virtualGatewayName" + ], + "members": { + "gatewayRouteName": { + "shape": "ResourceName", + "documentation": "

The name of the gateway route to describe.

", + "location": "uri", + "locationName": "gatewayRouteName" + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh that the gateway route resides in.

", + "location": "uri", + "locationName": "meshName" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location": "querystring", + "locationName": "meshOwner" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name of the virtual gateway that the gateway route is associated with.

", + "location": "uri", + "locationName": "virtualGatewayName" + } + } + }, "TlsValidationContextTrust": { "type": "structure", "members": { "acm": { "shape": "TlsValidationContextAcmTrust", - "documentation": "

A reference to an object that represents a TLS validation context trust for an AWS Certicate Manager (ACM) certificate.

" + "documentation": "

A reference to an object that represents a TLS validation context trust for an AWS Certicate Manager (ACM)\n certificate.

" }, "file": { "shape": "TlsValidationContextFileTrust", @@ -1921,6 +2806,11 @@ }, "documentation": "

An object that represents a port mapping.

" }, + "VirtualGatewayHealthCheckThreshold": { + "type": "integer", + "min": 2, + "max": 10 + }, "ListVirtualServicesOutput": { "type": "structure", "required": [ @@ -1960,16 +2850,74 @@ "documentation": "

The relative weight of the weighted target.

" } }, - "documentation": "

An object that represents a target and its relative weight. Traffic is distributed across\n targets according to their relative weight. For example, a weighted target with a relative\n weight of 50 receives five times as much traffic as one with a relative weight of\n 10. The total weight for all targets combined must be less than or equal to 100.

" + "documentation": "

An object that represents a target and its relative weight. Traffic is distributed\n across targets according to their relative weight. For example, a weighted target with a\n relative weight of 50 receives five times as much traffic as one with a relative weight of\n 10. The total weight for all targets combined must be less than or equal to 100.

" + }, + "GrpcGatewayRoute": { + "type": "structure", + "required": [ + "action", + "match" + ], + "members": { + "action": { + "shape": "GrpcGatewayRouteAction", + "documentation": "

An object that represents the action to take if a match is determined.

" + }, + "match": { + "shape": "GrpcGatewayRouteMatch", + "documentation": "

An object that represents the criteria for determining a request match.

" + } + }, + "documentation": "

An object that represents a gRPC gateway route.

" + }, + "GatewayRouteData": { + "type": "structure", + "required": [ + "gatewayRouteName", + "meshName", + "metadata", + "spec", + "status", + "virtualGatewayName" + ], + "members": { + "gatewayRouteName": { + "shape": "ResourceName", + "documentation": "

The name of the gateway route.

" + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh that the resource resides in.

" + }, + "metadata": { + "shape": "ResourceMetadata" + }, + "spec": { + "shape": "GatewayRouteSpec", + "documentation": "

The specifications of the gateway route.

" + }, + "status": { + "shape": "GatewayRouteStatus", + "documentation": "

The status of the gateway route.

" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The virtual gateway that the gateway route is associated with.

" + } + }, + "documentation": "

An object that represents a gateway route returned by a describe operation.

" }, "RouteRef": { "type": "structure", "required": [ "arn", + "createdAt", + "lastUpdatedAt", "meshName", "meshOwner", "resourceOwner", "routeName", + "version", "virtualRouterName" ], "members": { @@ -1977,22 +2925,34 @@ "shape": "Arn", "documentation": "

The full Amazon Resource Name (ARN) for the route.

" }, + "createdAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + }, + "lastUpdatedAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + }, "meshName": { "shape": "ResourceName", "documentation": "

The name of the service mesh that the route resides in.

" }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner, or another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" }, "routeName": { "shape": "ResourceName", "documentation": "

The name of the route.

" }, + "version": { + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" + }, "virtualRouterName": { "shape": "ResourceName", "documentation": "

The virtual router that the route is associated with.

" @@ -2015,7 +2975,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2123,7 +3083,7 @@ "documentation": "

One or more ACM Amazon Resource Name (ARN)s.

" } }, - "documentation": "

An object that represents a TLS validation context trust for an AWS Certicate Manager (ACM) certificate.

" + "documentation": "

An object that represents a TLS validation context trust for an AWS Certicate Manager (ACM)\n certificate.

" }, "ForbiddenException": { "type": "structure", @@ -2180,6 +3140,28 @@ "documentation": "", "payload": "mesh" }, + "VirtualGatewayClientPolicyTls": { + "type": "structure", + "required": [ + "validation" + ], + "members": { + "enforce": { + "shape": "Boolean", + "box": true, + "documentation": "

Whether the policy is enforced. The default is True, if a value isn't\n specified.

" + }, + "ports": { + "shape": "PortSet", + "documentation": "

One or more ports that the policy is enforced for.

" + }, + "validation": { + "shape": "VirtualGatewayTlsValidationContext", + "documentation": "

A reference to an object that represents a TLS validation context.

" + } + }, + "documentation": "

An object that represents a Transport Layer Security (TLS) client policy.

" + }, "EgressFilterType": { "type": "string", "enum": [ @@ -2195,6 +3177,40 @@ "Hostname": { "type": "string" }, + "VirtualGatewayStatus": { + "type": "structure", + "required": [ + "status" + ], + "members": { + "status": { + "shape": "VirtualGatewayStatusCode", + "documentation": "

The current status.

" + } + }, + "documentation": "

An object that represents the status of the mesh resource.

" + }, + "GatewayRouteStatus": { + "type": "structure", + "required": [ + "status" + ], + "members": { + "status": { + "shape": "GatewayRouteStatusCode", + "documentation": "

The current status for the gateway route.

" + } + }, + "documentation": "

An object that represents the current status of a gateway route.

" + }, + "VirtualGatewayListeners": { + "type": "list", + "member": { + "shape": "VirtualGatewayListener" + }, + "min": 0, + "max": 1 + }, "TagResourceInput": { "type": "structure", "required": [ @@ -2215,6 +3231,48 @@ }, "documentation": "" }, + "CreateVirtualGatewayOutput": { + "type": "structure", + "required": [ + "virtualGateway" + ], + "members": { + "virtualGateway": { + "shape": "VirtualGatewayData", + "documentation": "

The full description of your virtual gateway following the create call.

" + } + }, + "payload": "virtualGateway" + }, + "ListVirtualGatewaysOutput": { + "type": "structure", + "required": [ + "virtualGateways" + ], + "members": { + "nextToken": { + "shape": "String", + "documentation": "

The nextToken value to include in a future ListVirtualGateways\n request. When the results of a ListVirtualGateways request exceed\n limit, you can use this value to retrieve the next page of results. This\n value is null when there are no more results to return.

" + }, + "virtualGateways": { + "shape": "VirtualGatewayList", + "documentation": "

The list of existing virtual gateways for the specified service mesh.

" + } + } + }, + "VirtualGatewayTlsValidationContext": { + "type": "structure", + "required": [ + "trust" + ], + "members": { + "trust": { + "shape": "VirtualGatewayTlsValidationContextTrust", + "documentation": "

A reference to an object that represents a TLS validation context trust.

" + } + }, + "documentation": "

An object that represents a Transport Layer Security (TLS) validation context.

" + }, "VirtualServiceProvider": { "type": "structure", "members": { @@ -2238,7 +3296,7 @@ }, "methodName": { "shape": "MethodName", - "documentation": "

The method name to match from the request. If you specify a name, you must also specify a serviceName.

" + "documentation": "

The method name to match from the request. If you specify a name, you must also specify\n a serviceName.

" }, "serviceName": { "shape": "ServiceName", @@ -2314,7 +3372,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2377,6 +3435,88 @@ }, "documentation": "

An object that represents a route specification. Specify one route type.

" }, + "GatewayRouteRef": { + "type": "structure", + "required": [ + "arn", + "createdAt", + "gatewayRouteName", + "lastUpdatedAt", + "meshName", + "meshOwner", + "resourceOwner", + "version", + "virtualGatewayName" + ], + "members": { + "arn": { + "shape": "Arn", + "documentation": "

The full Amazon Resource Name (ARN) for the gateway route.

" + }, + "createdAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + }, + "gatewayRouteName": { + "shape": "ResourceName", + "documentation": "

The name of the gateway route.

" + }, + "lastUpdatedAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh that the resource resides in.

" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" + }, + "resourceOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + }, + "version": { + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The virtual gateway that the gateway route is associated with.

" + } + }, + "documentation": "

An object that represents a gateway route returned by a list operation.

" + }, + "VirtualGatewayListenerTlsAcmCertificate": { + "type": "structure", + "required": [ + "certificateArn" + ], + "members": { + "certificateArn": { + "shape": "Arn", + "documentation": "

The Amazon Resource Name (ARN) for the certificate. The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" + } + }, + "documentation": "

An object that represents an AWS Certicate Manager (ACM) certificate.

" + }, + "ListGatewayRoutesOutput": { + "type": "structure", + "required": [ + "gatewayRoutes" + ], + "members": { + "gatewayRoutes": { + "shape": "GatewayRouteList", + "documentation": "

The list of existing gateway routes for the specified service mesh and virtual\n gateway.

" + }, + "nextToken": { + "shape": "String", + "documentation": "

The nextToken value to include in a future ListGatewayRoutes\n request. When the results of a ListGatewayRoutes request exceed\n limit, you can use this value to retrieve the next page of results. This\n value is null when there are no more results to return.

" + } + } + }, "CreateVirtualServiceOutput": { "type": "structure", "required": [ @@ -2417,6 +3557,18 @@ }, "documentation": "

An object that represents a virtual node service provider.

" }, + "HttpTimeout": { + "type": "structure", + "members": { + "idle": { + "shape": "Duration" + }, + "perRequest": { + "shape": "Duration" + } + }, + "documentation": "

An object that represents types of timeouts.

" + }, "DeleteVirtualServiceInput": { "type": "structure", "required": [ @@ -2432,7 +3584,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2458,6 +3610,14 @@ }, "documentation": "

An object that represents a Transport Layer Security (TLS) validation context.

" }, + "GatewayRouteStatusCode": { + "type": "string", + "enum": [ + "ACTIVE", + "DELETED", + "INACTIVE" + ] + }, "DeleteVirtualRouterOutput": { "type": "structure", "required": [ @@ -2472,12 +3632,52 @@ "documentation": "", "payload": "virtualRouter" }, + "DescribeVirtualGatewayInput": { + "type": "structure", + "required": [ + "meshName", + "virtualGatewayName" + ], + "members": { + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh that the gateway route resides in.

", + "location": "uri", + "locationName": "meshName" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location": "querystring", + "locationName": "meshOwner" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name of the virtual gateway to describe.

", + "location": "uri", + "locationName": "virtualGatewayName" + } + } + }, "TagsLimit": { "type": "integer", "box": true, "min": 1, "max": 50 }, + "GrpcGatewayRouteAction": { + "type": "structure", + "required": [ + "target" + ], + "members": { + "target": { + "shape": "GatewayRouteTarget", + "documentation": "

An object that represents the target that traffic is routed to when a request matches the gateway route.

" + } + }, + "documentation": "

An object that represents the action to take if a match is determined.

" + }, "DeleteVirtualNodeOutput": { "type": "structure", "required": [ @@ -2513,7 +3713,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2543,7 +3743,7 @@ }, "mode": { "shape": "ListenerTlsMode", - "documentation": "

Specify one of the following modes.

\n
    \n
  • \n

    \n STRICT – Listener only accepts connections with TLS enabled.

    \n
  • \n
  • \n

    \n PERMISSIVE – Listener accepts connections with or without TLS enabled.

    \n
  • \n
  • \n

    \n DISABLED – Listener only accepts connections without TLS.

    \n
  • \n
" + "documentation": "

Specify one of the following modes.

\n
    \n
  • \n

    \n STRICT – Listener only accepts connections with TLS\n enabled.

    \n
  • \n
  • \n

    \n PERMISSIVE – Listener accepts connections with or\n without TLS enabled.

    \n
  • \n
  • \n

    \n DISABLED – Listener only accepts connections without\n TLS.

    \n
  • \n
" } }, "documentation": "

An object that represents the Transport Layer Security (TLS) properties for a listener.

" @@ -2592,7 +3792,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2635,7 +3835,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2652,6 +3852,29 @@ }, "documentation": "" }, + "HttpGatewayRouteAction": { + "type": "structure", + "required": [ + "target" + ], + "members": { + "target": { + "shape": "GatewayRouteTarget", + "documentation": "

An object that represents the target that traffic is routed to when a request matches the gateway route.

" + } + }, + "documentation": "

An object that represents the action to take if a match is determined.

" + }, + "GrpcGatewayRouteMatch": { + "type": "structure", + "members": { + "serviceName": { + "shape": "ServiceName", + "documentation": "

The fully qualified domain name for the service to match from the request.

" + } + }, + "documentation": "

An object that represents the criteria for determining a request match.

" + }, "ListTagsForResourceInput": { "type": "structure", "required": [ @@ -2687,6 +3910,14 @@ "min": 1, "max": 5 }, + "VirtualGatewayStatusCode": { + "type": "string", + "enum": [ + "ACTIVE", + "DELETED", + "INACTIVE" + ] + }, "ServiceUnavailableException": { "type": "structure", "members": { @@ -2731,7 +3962,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2744,6 +3975,19 @@ }, "documentation": "" }, + "UpdateGatewayRouteOutput": { + "type": "structure", + "required": [ + "gatewayRoute" + ], + "members": { + "gatewayRoute": { + "shape": "GatewayRouteData", + "documentation": "

A full description of the gateway route that was updated.

" + } + }, + "payload": "gatewayRoute" + }, "DescribeRouteInput": { "type": "structure", "required": [ @@ -2760,7 +4004,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2816,6 +4060,25 @@ "tcp" ] }, + "DeleteGatewayRouteOutput": { + "type": "structure", + "required": [ + "gatewayRoute" + ], + "members": { + "gatewayRoute": { + "shape": "GatewayRouteData", + "documentation": "

The gateway route that was deleted.

" + } + }, + "payload": "gatewayRoute" + }, + "VirtualGatewayList": { + "type": "list", + "member": { + "shape": "VirtualGatewayRef" + } + }, "VirtualNodeStatusCode": { "type": "string", "enum": [ @@ -2848,7 +4111,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2933,7 +4196,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -2956,9 +4219,12 @@ "type": "structure", "required": [ "arn", + "createdAt", + "lastUpdatedAt", "meshName", "meshOwner", "resourceOwner", + "version", "virtualServiceName" ], "members": { @@ -2966,17 +4232,29 @@ "shape": "Arn", "documentation": "

The full Amazon Resource Name (ARN) for the virtual service.

" }, + "createdAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + }, + "lastUpdatedAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + }, "meshName": { "shape": "ResourceName", "documentation": "

The name of the service mesh that the virtual service resides in.

" }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner, or another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + }, + "version": { + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" }, "virtualServiceName": { "shape": "ServiceName", @@ -2985,6 +4263,20 @@ }, "documentation": "

An object that represents a virtual service returned by a list operation.

" }, + "GrpcTimeout": { + "type": "structure", + "members": { + "idle": { + "shape": "Duration", + "documentation": "

An object that represents an idle timeout. An idle timeout bounds the amount of time that a connection may be idle. The default value is none.

" + }, + "perRequest": { + "shape": "Duration", + "documentation": "

An object that represents a per request timeout. The default value is 15 seconds. If you set a higher timeout, then make sure that the higher value is set for each App Mesh resource in a conversation. For example, if a virtual node backend uses a virtual router provider to route to another virtual node, then the timeout should be greater than 15 seconds for the source and destination virtual node and the route.

" + } + }, + "documentation": "

An object that represents types of timeouts.

" + }, "VirtualNodeStatus": { "type": "structure", "required": [ @@ -3002,9 +4294,12 @@ "type": "structure", "required": [ "arn", + "createdAt", + "lastUpdatedAt", "meshName", "meshOwner", "resourceOwner", + "version", "virtualRouterName" ], "members": { @@ -3012,17 +4307,29 @@ "shape": "Arn", "documentation": "

The full Amazon Resource Name (ARN) for the virtual router.

" }, + "createdAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + }, + "lastUpdatedAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + }, "meshName": { "shape": "ResourceName", "documentation": "

The name of the service mesh that the virtual router resides in.

" }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner, or another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + }, + "version": { + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" }, "virtualRouterName": { "shape": "ResourceName", @@ -3099,9 +4406,12 @@ "type": "structure", "required": [ "arn", + "createdAt", + "lastUpdatedAt", "meshName", "meshOwner", "resourceOwner", + "version", "virtualNodeName" ], "members": { @@ -3109,17 +4419,29 @@ "shape": "Arn", "documentation": "

The full Amazon Resource Name (ARN) for the virtual node.

" }, + "createdAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + }, + "lastUpdatedAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + }, "meshName": { "shape": "ResourceName", "documentation": "

The name of the service mesh that the virtual node resides in.

" }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner, or another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + }, + "version": { + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" }, "virtualNodeName": { "shape": "ResourceName", @@ -3170,6 +4492,19 @@ }, "documentation": "

An object that represents the action to take if a match is determined.

" }, + "VirtualGatewayTlsValidationContextFileTrust": { + "type": "structure", + "required": [ + "certificateChain" + ], + "members": { + "certificateChain": { + "shape": "FilePath", + "documentation": "

The certificate trust chain for a certificate stored on the file system of the virtual\n node that the proxy is running on.

" + } + }, + "documentation": "

An object that represents a Transport Layer Security (TLS) validation context trust for a local file.

" + }, "LimitExceededException": { "type": "structure", "members": { @@ -3177,7 +4512,7 @@ "shape": "String" } }, - "documentation": "

You have exceeded a service limit for your account. For more information, see Service\n Limits in the AWS App Mesh User Guide.

", + "documentation": "

You have exceeded a service limit for your account. For more information, see Service\n Limits in the AWS App Mesh User Guide.

", "exception": true, "error": { "code": "LimitExceededException", @@ -3239,7 +4574,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3276,6 +4611,14 @@ }, "documentation": "

An object that represents the AWS Cloud Map attribute information for your virtual\n node.

" }, + "VirtualGatewayListenerTlsMode": { + "type": "string", + "enum": [ + "DISABLED", + "PERMISSIVE", + "STRICT" + ] + }, "VirtualServiceSpec": { "type": "structure", "members": { @@ -3286,6 +4629,29 @@ }, "documentation": "

An object that represents the specification of a virtual service.

" }, + "VirtualGatewayTlsValidationContextAcmTrust": { + "type": "structure", + "required": [ + "certificateAuthorityArns" + ], + "members": { + "certificateAuthorityArns": { + "shape": "VirtualGatewayCertificateAuthorityArns", + "documentation": "

One or more ACM Amazon Resource Name (ARN)s.

" + } + }, + "documentation": "

An object that represents a TLS validation context trust for an AWS Certicate Manager (ACM)\n certificate.

" + }, + "VirtualGatewayAccessLog": { + "type": "structure", + "members": { + "file": { + "shape": "VirtualGatewayFileAccessLog", + "documentation": "

The file object to send virtual gateway access logs to.

" + } + }, + "documentation": "

The access log configuration for a virtual gateway.

" + }, "MatchRange": { "type": "structure", "required": [ @@ -3334,6 +4700,10 @@ "action": { "shape": "TcpRouteAction", "documentation": "

The action to take if a match is determined.

" + }, + "timeout": { + "shape": "TcpTimeout", + "documentation": "

An object that represents types of timeouts.

" } }, "documentation": "

An object that represents a TCP route type.

" @@ -3344,6 +4714,19 @@ "shape": "VirtualNodeRef" } }, + "UpdateVirtualGatewayOutput": { + "type": "structure", + "required": [ + "virtualGateway" + ], + "members": { + "virtualGateway": { + "shape": "VirtualGatewayData", + "documentation": "

A full description of the virtual gateway that was updated.

" + } + }, + "payload": "virtualGateway" + }, "ListVirtualRoutersInput": { "type": "structure", "required": [ @@ -3364,7 +4747,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3410,7 +4793,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3453,7 +4836,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3466,6 +4849,16 @@ }, "documentation": "" }, + "VirtualGatewayClientPolicy": { + "type": "structure", + "members": { + "tls": { + "shape": "VirtualGatewayClientPolicyTls", + "documentation": "

A reference to an object that represents a Transport Layer Security (TLS) client policy.

" + } + }, + "documentation": "

An object that represents a client policy.

" + }, "ListVirtualNodesLimit": { "type": "integer", "box": true, @@ -3501,6 +4894,16 @@ "Timestamp": { "type": "timestamp" }, + "VirtualGatewayLogging": { + "type": "structure", + "members": { + "accessLog": { + "shape": "VirtualGatewayAccessLog", + "documentation": "

The access log configuration.

" + } + }, + "documentation": "

An object that represents logging information.

" + }, "HeaderMatch": { "type": "string", "min": 1, @@ -3511,6 +4914,19 @@ "min": 12, "max": 12 }, + "GatewayRouteTarget": { + "type": "structure", + "required": [ + "virtualService" + ], + "members": { + "virtualService": { + "shape": "GatewayRouteVirtualService", + "documentation": "

An object that represents a virtual service gateway route target.

" + } + }, + "documentation": "

An object that represents a gateway route target.

" + }, "Duration": { "type": "structure", "members": { @@ -3562,7 +4978,7 @@ "documentation": "

The client request scheme to match on. Specify only one.

" } }, - "documentation": "

An object that represents the requirements for a route to match HTTP requests for a virtual\n router.

" + "documentation": "

An object that represents the requirements for a route to match HTTP requests for a\n virtual router.

" }, "TagRef": { "type": "structure", @@ -3585,30 +5001,51 @@ "type": "structure", "required": [ "arn", + "createdAt", + "lastUpdatedAt", "meshName", "meshOwner", - "resourceOwner" + "resourceOwner", + "version" ], "members": { "arn": { "shape": "Arn", "documentation": "

The full Amazon Resource Name (ARN) of the service mesh.

" }, + "createdAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + }, + "lastUpdatedAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + }, "meshName": { "shape": "ResourceName", "documentation": "

The name of the service mesh.

" }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" }, "resourceOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner, or another account that the mesh is shared with. For more information about mesh sharing, see Working with Shared Meshes.

" + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + }, + "version": { + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" } }, "documentation": "

An object that represents a service mesh returned by a list operation.

" }, + "ListVirtualGatewaysLimit": { + "type": "integer", + "box": true, + "min": 1, + "max": 100 + }, "MeshStatusCode": { "type": "string", "enum": [ @@ -3645,6 +5082,25 @@ }, "documentation": "

An object that represents a service mesh returned by a describe operation.

" }, + "CreateGatewayRouteOutput": { + "type": "structure", + "required": [ + "gatewayRoute" + ], + "members": { + "gatewayRoute": { + "shape": "GatewayRouteData", + "documentation": "

The full description of your gateway route following the create call.

" + } + }, + "payload": "gatewayRoute" + }, + "GatewayRouteList": { + "type": "list", + "member": { + "shape": "GatewayRouteRef" + } + }, "VirtualRouterStatus": { "type": "structure", "required": [ @@ -3671,6 +5127,33 @@ }, "documentation": "

An object that represents the action to take if a match is determined.

" }, + "DeleteVirtualGatewayInput": { + "type": "structure", + "required": [ + "meshName", + "virtualGatewayName" + ], + "members": { + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh to delete the virtual gateway from.

", + "location": "uri", + "locationName": "meshName" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location": "querystring", + "locationName": "meshOwner" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name of the virtual gateway to delete.

", + "location": "uri", + "locationName": "virtualGatewayName" + } + } + }, "DescribeVirtualNodeInput": { "type": "structure", "required": [ @@ -3686,7 +5169,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3726,6 +5209,10 @@ "shape": "PortMapping", "documentation": "

The port mapping information for the listener.

" }, + "timeout": { + "shape": "ListenerTimeout", + "documentation": "

An object that represents timeouts for different protocols.

" + }, "tls": { "shape": "ListenerTls", "documentation": "

A reference to an object that represents the Transport Layer Security (TLS) properties for a listener.

" @@ -3751,6 +5238,10 @@ "retryPolicy": { "shape": "GrpcRetryPolicy", "documentation": "

An object that represents a retry policy.

" + }, + "timeout": { + "shape": "GrpcTimeout", + "documentation": "

An object that represents types of timeouts.

" } }, "documentation": "

An object that represents a gRPC route type.

" @@ -3770,11 +5261,11 @@ "enforce": { "shape": "Boolean", "box": true, - "documentation": "

Whether the policy is enforced. The default is True, if a value isn't specified.

" + "documentation": "

Whether the policy is enforced. The default is True, if a value isn't\n specified.

" }, "ports": { "shape": "PortSet", - "documentation": "

The range of ports that the policy is enforced for.

" + "documentation": "

One or more ports that the policy is enforced for.

" }, "validation": { "shape": "TlsValidationContext", @@ -3783,6 +5274,20 @@ }, "documentation": "

An object that represents a Transport Layer Security (TLS) client policy.

" }, + "VirtualGatewayTlsValidationContextTrust": { + "type": "structure", + "members": { + "acm": { + "shape": "VirtualGatewayTlsValidationContextAcmTrust", + "documentation": "

A reference to an object that represents a TLS validation context trust for an AWS Certicate Manager (ACM)\n certificate.

" + }, + "file": { + "shape": "VirtualGatewayTlsValidationContextFileTrust", + "documentation": "

An object that represents a TLS validation context trust for a local file.

" + } + }, + "documentation": "

An object that represents a Transport Layer Security (TLS) validation context trust.

" + }, "DeleteVirtualServiceOutput": { "type": "structure", "required": [ @@ -3797,6 +5302,14 @@ "documentation": "", "payload": "virtualService" }, + "VirtualGatewayPortProtocol": { + "type": "string", + "enum": [ + "grpc", + "http", + "http2" + ] + }, "VirtualNodeServiceProvider": { "type": "structure", "required": [ @@ -3810,6 +5323,24 @@ }, "documentation": "

An object that represents a virtual node service provider.

" }, + "HttpGatewayRoute": { + "type": "structure", + "required": [ + "action", + "match" + ], + "members": { + "action": { + "shape": "HttpGatewayRouteAction", + "documentation": "

An object that represents the action to take if a match is determined.

" + }, + "match": { + "shape": "HttpGatewayRouteMatch", + "documentation": "

An object that represents the criteria for determining a request match.

" + } + }, + "documentation": "

An object that represents an HTTP gateway route.

" + }, "BackendDefaults": { "type": "structure", "members": { @@ -3833,10 +5364,10 @@ }, "privateKey": { "shape": "FilePath", - "documentation": "

The private key for a certificate stored on the file system of the virtual node that the proxy is running on.

" + "documentation": "

The private key for a certificate stored on the file system of the virtual node that the\n proxy is running on.

" } }, - "documentation": "

An object that represents a local file certificate. The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" + "documentation": "

An object that represents a local file certificate.\n The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" }, "HttpRetryPolicy": { "type": "structure", @@ -3847,7 +5378,7 @@ "members": { "httpRetryEvents": { "shape": "HttpRetryPolicyEvents", - "documentation": "

Specify at least one of the following values.

\n
    \n
  • \n

    \n server-error – HTTP status codes 500, 501,\n 502, 503, 504, 505, 506, 507, 508, 510, and 511

    \n
  • \n
  • \n

    \n gateway-error – HTTP status codes 502,\n 503, and 504

    \n
  • \n
  • \n

    \n client-error – HTTP status code 409

    \n
  • \n
  • \n

    \n stream-error – Retry on refused\n stream

    \n
  • \n
" + "documentation": "

Specify at least one of the following values.

\n
    \n
  • \n

    \n server-error – HTTP status codes 500, 501,\n 502, 503, 504, 505, 506, 507, 508, 510, and 511

    \n
  • \n
  • \n

    \n gateway-error – HTTP status codes 502,\n 503, and 504

    \n
  • \n
  • \n

    \n client-error – HTTP status code 409

    \n
  • \n
  • \n

    \n stream-error – Retry on refused\n stream

    \n
  • \n
" }, "maxRetries": { "shape": "MaxRetries", @@ -3879,7 +5410,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -3918,6 +5449,82 @@ "senderFault": true } }, + "UpdateGatewayRouteInput": { + "type": "structure", + "required": [ + "gatewayRouteName", + "meshName", + "spec", + "virtualGatewayName" + ], + "members": { + "clientToken": { + "shape": "String", + "documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\nrequest. Up to 36 letters, numbers, hyphens, and underscores are allowed.

", + "idempotencyToken": true + }, + "gatewayRouteName": { + "shape": "ResourceName", + "documentation": "

The name of the gateway route to update.

", + "location": "uri", + "locationName": "gatewayRouteName" + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh that the gateway route resides in.

", + "location": "uri", + "locationName": "meshName" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location": "querystring", + "locationName": "meshOwner" + }, + "spec": { + "shape": "GatewayRouteSpec", + "documentation": "

The new gateway route specification to apply. This overwrites the existing data.

" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name of the virtual gateway that the gateway route is associated with.

", + "location": "uri", + "locationName": "virtualGatewayName" + } + } + }, + "ListVirtualGatewaysInput": { + "type": "structure", + "required": [ + "meshName" + ], + "members": { + "limit": { + "shape": "ListVirtualGatewaysLimit", + "documentation": "

The maximum number of results returned by ListVirtualGateways in paginated\n output. When you use this parameter, ListVirtualGateways returns only\n limit results in a single page along with a nextToken response\n element. You can see the remaining results of the initial request by sending another\n ListVirtualGateways request with the returned nextToken value.\n This value can be between 1 and 100. If you don't use this\n parameter, ListVirtualGateways returns up to 100 results and\n a nextToken value if applicable.

", + "location": "querystring", + "locationName": "limit" + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh to list virtual gateways in.

", + "location": "uri", + "locationName": "meshName" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location": "querystring", + "locationName": "meshOwner" + }, + "nextToken": { + "shape": "String", + "documentation": "

The nextToken value returned from a previous paginated\n ListVirtualGateways request where limit was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken value.

", + "location": "querystring", + "locationName": "nextToken" + } + } + }, "PortNumber": { "type": "integer", "min": 1, @@ -3931,7 +5538,7 @@ "members": { "certificateChain": { "shape": "FilePath", - "documentation": "

The certificate trust chain for a certificate stored on the file system of the virtual node that the proxy is running on.

" + "documentation": "

The certificate trust chain for a certificate stored on the file system of the virtual\n node that the proxy is running on.

" } }, "documentation": "

An object that represents a Transport Layer Security (TLS) validation context trust for a local file.

" @@ -3979,7 +5586,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then\n the account that you specify must share the mesh with your account before you can create \n the resource in the service mesh. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -4000,13 +5607,21 @@ }, "virtualRouterName": { "shape": "ResourceName", - "documentation": "

The name of the virtual router in which to create the route. If the virtual router is in a shared mesh,\n then you must be the owner of the virtual router resource.

", + "documentation": "

The name of the virtual router in which to create the route. If the virtual router is in\n a shared mesh, then you must be the owner of the virtual router resource.

", "location": "uri", "locationName": "virtualRouterName" } }, "documentation": "" }, + "VirtualGatewayCertificateAuthorityArns": { + "type": "list", + "member": { + "shape": "Arn" + }, + "min": 1, + "max": 3 + }, "WeightedTargets": { "type": "list", "member": { @@ -4026,6 +5641,15 @@ "String": { "type": "string" }, + "TcpTimeout": { + "type": "structure", + "members": { + "idle": { + "shape": "Duration" + } + }, + "documentation": "

An object that represents types of timeouts.

" + }, "HttpScheme": { "type": "string", "enum": [ @@ -4033,6 +5657,40 @@ "https" ] }, + "DeleteGatewayRouteInput": { + "type": "structure", + "required": [ + "gatewayRouteName", + "meshName", + "virtualGatewayName" + ], + "members": { + "gatewayRouteName": { + "shape": "ResourceName", + "documentation": "

The name of the gateway route to delete.

", + "location": "uri", + "locationName": "gatewayRouteName" + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh to delete the gateway route from.

", + "location": "uri", + "locationName": "meshName" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", + "location": "querystring", + "locationName": "meshOwner" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name of the virtual gateway to delete the route from.

", + "location": "uri", + "locationName": "virtualGatewayName" + } + } + }, "UpdateRouteInput": { "type": "structure", "required": [ @@ -4055,7 +5713,7 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" }, @@ -4096,6 +5754,10 @@ "retryPolicy": { "shape": "HttpRetryPolicy", "documentation": "

An object that represents a retry policy.

" + }, + "timeout": { + "shape": "HttpTimeout", + "documentation": "

An object that represents types of timeouts.

" } }, "documentation": "

An object that represents an HTTP or HTTP/2 route type.

" @@ -4114,13 +5776,61 @@ }, "meshOwner": { "shape": "AccountId", - "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with Shared Meshes.

", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

", "location": "querystring", "locationName": "meshOwner" } }, "documentation": "" }, + "VirtualGatewayRef": { + "type": "structure", + "required": [ + "arn", + "createdAt", + "lastUpdatedAt", + "meshName", + "meshOwner", + "resourceOwner", + "version", + "virtualGatewayName" + ], + "members": { + "arn": { + "shape": "Arn", + "documentation": "

The full Amazon Resource Name (ARN) for the resource.

" + }, + "createdAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was created.

" + }, + "lastUpdatedAt": { + "shape": "Timestamp", + "documentation": "

The Unix epoch timestamp in seconds for when the resource was last updated.

" + }, + "meshName": { + "shape": "ResourceName", + "documentation": "

The name of the service mesh that the resource resides in.

" + }, + "meshOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the service mesh owner. If the account ID is not your own, then it's\n the ID of the account that shared the mesh with your account. For more information about mesh sharing, see Working with shared meshes.

" + }, + "resourceOwner": { + "shape": "AccountId", + "documentation": "

The AWS IAM account ID of the resource owner. If the account ID is not your own, then it's\n the ID of the mesh owner or of another account that the mesh is shared with. For more information about mesh sharing, see Working with shared meshes.

" + }, + "version": { + "shape": "Long", + "documentation": "

The version of the resource. Resources are created at version 1, and this version is incremented each time that they're updated.

" + }, + "virtualGatewayName": { + "shape": "ResourceName", + "documentation": "

The name of the resource.

" + } + }, + "documentation": "

An object that represents a virtual gateway returned by a list operation.

" + }, "MeshSpec": { "type": "structure", "members": { @@ -4131,6 +5841,32 @@ }, "documentation": "

An object that represents the specification of a service mesh.

" }, + "DescribeVirtualGatewayOutput": { + "type": "structure", + "required": [ + "virtualGateway" + ], + "members": { + "virtualGateway": { + "shape": "VirtualGatewayData", + "documentation": "

The full description of your virtual gateway.

" + } + }, + "payload": "virtualGateway" + }, + "DescribeGatewayRouteOutput": { + "type": "structure", + "required": [ + "gatewayRoute" + ], + "members": { + "gatewayRoute": { + "shape": "GatewayRouteData", + "documentation": "

The full description of your gateway route.

" + } + }, + "payload": "gatewayRoute" + }, "ListTagsForResourceOutput": { "type": "structure", "required": [ @@ -4207,7 +5943,7 @@ "members": { "certificateArn": { "shape": "Arn", - "documentation": "

The Amazon Resource Name (ARN) for the certificate. The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" + "documentation": "

The Amazon Resource Name (ARN) for the certificate. The certificate must meet specific requirements and you must have proxy authorization enabled. For more information, see Transport Layer Security (TLS).

" } }, "documentation": "

An object that represents an AWS Certicate Manager (ACM) certificate.

" diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index 6b3a48ecee97..ab79fc2a6126 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index 0def5676b49b..48d092d4ca83 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT appsync diff --git a/services/appsync/src/main/resources/codegen-resources/service-2.json b/services/appsync/src/main/resources/codegen-resources/service-2.json index b2dfb3719b8e..fdeb80c95704 100644 --- a/services/appsync/src/main/resources/codegen-resources/service-2.json +++ b/services/appsync/src/main/resources/codegen-resources/service-2.json @@ -754,7 +754,7 @@ }, "type":{ "shape":"ApiCacheType", - "documentation":"

The cache instance type.

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" + "documentation":"

The cache instance type. Valid values are

  • SMALL

  • MEDIUM

  • LARGE

  • XLARGE

  • LARGE_2X

  • LARGE_4X

  • LARGE_8X (not available in all regions)

  • LARGE_12X

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are avaible, but their use is discouraged:

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" }, "status":{ "shape":"ApiCacheStatus", @@ -782,7 +782,15 @@ "R4_XLARGE", "R4_2XLARGE", "R4_4XLARGE", - "R4_8XLARGE" + "R4_8XLARGE", + "SMALL", + "MEDIUM", + "LARGE", + "XLARGE", + "LARGE_2X", + "LARGE_4X", + "LARGE_8X", + "LARGE_12X" ] }, "ApiCachingBehavior":{ @@ -993,7 +1001,7 @@ }, "type":{ "shape":"ApiCacheType", - "documentation":"

The cache instance type.

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" + "documentation":"

The cache instance type. Valid values are

  • SMALL

  • MEDIUM

  • LARGE

  • XLARGE

  • LARGE_2X

  • LARGE_4X

  • LARGE_8X (not available in all regions)

  • LARGE_12X

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are avaible, but their use is discouraged:

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" } }, "documentation":"

Represents the input of a CreateApiCache operation.

" @@ -2436,7 +2444,7 @@ }, "dbClusterIdentifier":{ "shape":"String", - "documentation":"

Amazon RDS cluster identifier.

" + "documentation":"

Amazon RDS cluster ARN.

" }, "databaseName":{ "shape":"String", @@ -2745,7 +2753,7 @@ }, "type":{ "shape":"ApiCacheType", - "documentation":"

The cache instance type.

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" + "documentation":"

The cache instance type. Valid values are

  • SMALL

  • MEDIUM

  • LARGE

  • XLARGE

  • LARGE_2X

  • LARGE_4X

  • LARGE_8X (not available in all regions)

  • LARGE_12X

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are avaible, but their use is discouraged:

  • T2_SMALL: A t2.small instance type.

  • T2_MEDIUM: A t2.medium instance type.

  • R4_LARGE: A r4.large instance type.

  • R4_XLARGE: A r4.xlarge instance type.

  • R4_2XLARGE: A r4.2xlarge instance type.

  • R4_4XLARGE: A r4.4xlarge instance type.

  • R4_8XLARGE: A r4.8xlarge instance type.

" } }, "documentation":"

Represents the input of a UpdateApiCache operation.

" diff --git a/services/athena/pom.xml b/services/athena/pom.xml index a6ea53d014aa..6239c671b134 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/athena/src/main/resources/codegen-resources/paginators-1.json b/services/athena/src/main/resources/codegen-resources/paginators-1.json index eee98d0ef088..f323d611949f 100644 --- a/services/athena/src/main/resources/codegen-resources/paginators-1.json +++ b/services/athena/src/main/resources/codegen-resources/paginators-1.json @@ -2,23 +2,47 @@ "pagination": { "GetQueryResults": { "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "ListDataCatalogs": { + "input_token": "NextToken", + "limit_key": "MaxResults", "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "DataCatalogsSummary" }, - "ListNamedQueries": { + "ListDatabases": { "input_token": "NextToken", + "limit_key": "MaxResults", "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "DatabaseList" + }, + "ListNamedQueries": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" }, "ListQueryExecutions": { "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "ListTableMetadata": { + "input_token": "NextToken", + "limit_key": "MaxResults", "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "TableMetadataList" }, - "ListWorkGroups": { + "ListTagsForResource": { "input_token": "NextToken", + "limit_key": "MaxResults", "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "Tags" + }, + "ListWorkGroups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" } } -} +} \ No newline at end of file diff --git a/services/athena/src/main/resources/codegen-resources/service-2.json b/services/athena/src/main/resources/codegen-resources/service-2.json index 35988536b21c..9a2fdf81391b 100644 --- a/services/athena/src/main/resources/codegen-resources/service-2.json +++ b/services/athena/src/main/resources/codegen-resources/service-2.json @@ -40,6 +40,20 @@ ], "documentation":"

Returns the details of a single query execution or a list of up to 50 query executions, which you provide as an array of query execution ID strings. Requires you to have access to the workgroup in which the queries ran. To get a list of query execution IDs, use ListQueryExecutionsInput$WorkGroup. Query executions differ from named (saved) queries. Use BatchGetNamedQueryInput to get details about named queries.

" }, + "CreateDataCatalog":{ + "name":"CreateDataCatalog", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataCatalogInput"}, + "output":{"shape":"CreateDataCatalogOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Creates (registers) a data catalog with the specified name and properties. Catalogs created are visible to all users of the same AWS account.

" + }, "CreateNamedQuery":{ "name":"CreateNamedQuery", "http":{ @@ -69,6 +83,20 @@ ], "documentation":"

Creates a workgroup with the specified name.

" }, + "DeleteDataCatalog":{ + "name":"DeleteDataCatalog", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDataCatalogInput"}, + "output":{"shape":"DeleteDataCatalogOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Deletes a data catalog.

" + }, "DeleteNamedQuery":{ "name":"DeleteNamedQuery", "http":{ @@ -99,6 +127,35 @@ "documentation":"

Deletes the workgroup with the specified name. The primary workgroup cannot be deleted.

", "idempotent":true }, + "GetDataCatalog":{ + "name":"GetDataCatalog", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDataCatalogInput"}, + "output":{"shape":"GetDataCatalogOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Returns the specified data catalog.

" + }, + "GetDatabase":{ + "name":"GetDatabase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDatabaseInput"}, + "output":{"shape":"GetDatabaseOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"MetadataException"} + ], + "documentation":"

Returns a database object for the specfied database and data catalog.

" + }, "GetNamedQuery":{ "name":"GetNamedQuery", "http":{ @@ -141,6 +198,21 @@ ], "documentation":"

Streams the results of a single query execution specified by QueryExecutionId from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

To stream query results successfully, the IAM principal with permission to call GetQueryResults also must have permissions to the Amazon S3 GetObject action for the Athena query results location.

IAM principals with permission to the Amazon S3 GetObject action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.

" }, + "GetTableMetadata":{ + "name":"GetTableMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTableMetadataInput"}, + "output":{"shape":"GetTableMetadataOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"MetadataException"} + ], + "documentation":"

Returns table metadata for the specified catalog, database, and table.

" + }, "GetWorkGroup":{ "name":"GetWorkGroup", "http":{ @@ -155,6 +227,35 @@ ], "documentation":"

Returns information about the workgroup with the specified name.

" }, + "ListDataCatalogs":{ + "name":"ListDataCatalogs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDataCatalogsInput"}, + "output":{"shape":"ListDataCatalogsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Lists the data catalogs in the current AWS account.

" + }, + "ListDatabases":{ + "name":"ListDatabases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatabasesInput"}, + "output":{"shape":"ListDatabasesOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"MetadataException"} + ], + "documentation":"

Lists the databases in the specified data catalog.

" + }, "ListNamedQueries":{ "name":"ListNamedQueries", "http":{ @@ -167,7 +268,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Provides a list of available query IDs only for queries saved in the specified workgroup. Requires that you have access to the workgroup. If a workgroup is not specified, lists the saved queries for the primary workgroup.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" + "documentation":"

Provides a list of available query IDs only for queries saved in the specified workgroup. Requires that you have access to the specified workgroup. If a workgroup is not specified, lists the saved queries for the primary workgroup.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" }, "ListQueryExecutions":{ "name":"ListQueryExecutions", @@ -183,6 +284,21 @@ ], "documentation":"

Provides a list of available query execution IDs for the queries in the specified workgroup. If a workgroup is not specified, returns a list of query execution IDs for the primary workgroup. Requires you to have access to the workgroup in which the queries ran.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

" }, + "ListTableMetadata":{ + "name":"ListTableMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTableMetadataInput"}, + "output":{"shape":"ListTableMetadataOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"MetadataException"} + ], + "documentation":"

Lists the metadata for the tables in the specified data catalog database.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -196,7 +312,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the tags associated with this workgroup.

" + "documentation":"

Lists the tags associated with an Athena workgroup or data catalog resource.

" }, "ListWorkGroups":{ "name":"ListWorkGroups", @@ -225,7 +341,7 @@ {"shape":"InvalidRequestException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Runs the SQL query statements contained in the Query. Requires you to have access to the workgroup in which the query ran.

For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", + "documentation":"

Runs the SQL query statements contained in the Query. Requires you to have access to the workgroup in which the query ran. Running queries against an external catalog requires GetDataCatalog permission to the catalog. For code samples using the AWS SDK for Java, see Examples and Code Samples in the Amazon Athena User Guide.

", "idempotent":true }, "StopQueryExecution":{ @@ -256,7 +372,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds one or more tags to the resource, such as a workgroup. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize resources (workgroups) in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. For best practices, see AWS Tagging Strategies. The key length is from 1 (minimum) to 128 (maximum) Unicode characters in UTF-8. The tag value length is from 0 (minimum) to 256 (maximum) Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource. If you specify more than one, separate them by commas.

" + "documentation":"

Adds one or more tags to an Athena resource. A tag is a label that you assign to a resource. In Athena, a resource can be a workgroup or data catalog. Each tag consists of a key and an optional value, both of which you define. For example, you can use tags to categorize Athena workgroups or data catalogs by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups or data catalogs in your account. For best practices, see Tagging Best Practices. Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource. If you specify more than one tag, separate them by commas.

" }, "UntagResource":{ "name":"UntagResource", @@ -271,7 +387,21 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes one or more tags from the workgroup resource. Takes as an input a list of TagKey Strings separated by commas, and removes their tags at the same time.

" + "documentation":"

Removes one or more tags from a data catalog or workgroup resource.

" + }, + "UpdateDataCatalog":{ + "name":"UpdateDataCatalog", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDataCatalogInput"}, + "output":{"shape":"UpdateDataCatalogOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Updates the data catalog that has the specified name.

" }, "UpdateWorkGroup":{ "name":"UpdateWorkGroup", @@ -346,6 +476,31 @@ "type":"long", "min":10000000 }, + "CatalogNameString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "Column":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the column.

" + }, + "Type":{ + "shape":"TypeString", + "documentation":"

The data type of the column.

" + }, + "Comment":{ + "shape":"CommentString", + "documentation":"

Optional information about the column.

" + } + }, + "documentation":"

Contains metadata for a column in a table.

" + }, "ColumnInfo":{ "type":"structure", "required":[ @@ -400,6 +555,10 @@ "type":"list", "member":{"shape":"ColumnInfo"} }, + "ColumnList":{ + "type":"list", + "member":{"shape":"Column"} + }, "ColumnNullable":{ "type":"string", "enum":[ @@ -408,6 +567,46 @@ "UNKNOWN" ] }, + "CommentString":{ + "type":"string", + "max":255, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "CreateDataCatalogInput":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog to create. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" + }, + "Type":{ + "shape":"DataCatalogType", + "documentation":"

The type of data catalog to create: LAMBDA for a federated catalog, GLUE for AWS Glue Catalog, or HIVE for an external hive metastore.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

A description of the data catalog to be created.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type.

  • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

    metadata-function=lambda_arn, sdk-version=version_number

  • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

    • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

      metadata-function=lambda_arn, record-function=lambda_arn

    • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

      function=lambda_arn

  • The GLUE type has no parameters.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of comma separated tags to add to the data catalog that is created.

" + } + } + }, + "CreateDataCatalogOutput":{ + "type":"structure", + "members":{ + } + }, "CreateNamedQueryInput":{ "type":"structure", "required":[ @@ -470,7 +669,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

One or more tags, separated by commas, that you want to attach to the workgroup as you create it.

" + "documentation":"

A list of comma separated tags to add to the workgroup that is created.

" } } }, @@ -479,6 +678,81 @@ "members":{ } }, + "DataCatalog":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

An optional description of the data catalog.

" + }, + "Type":{ + "shape":"DataCatalogType", + "documentation":"

The type of data catalog: LAMBDA for a federated catalog, GLUE for AWS Glue Catalog, or HIVE for an external hive metastore.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type.

  • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

    metadata-function=lambda_arn, sdk-version=version_number

  • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

    • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

      metadata-function=lambda_arn, record-function=lambda_arn

    • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

      function=lambda_arn

  • The GLUE type has no parameters.

" + } + }, + "documentation":"

Contains information about a data catalog in an AWS account.

" + }, + "DataCatalogSummary":{ + "type":"structure", + "members":{ + "CatalogName":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog.

" + }, + "Type":{ + "shape":"DataCatalogType", + "documentation":"

The data catalog type.

" + } + }, + "documentation":"

The summary information for the data catalog, which includes its name and type.

" + }, + "DataCatalogSummaryList":{ + "type":"list", + "member":{"shape":"DataCatalogSummary"} + }, + "DataCatalogType":{ + "type":"string", + "enum":[ + "LAMBDA", + "GLUE", + "HIVE" + ] + }, + "Database":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the database.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

An optional description of the database.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

A set of custom key/value pairs.

" + } + }, + "documentation":"

Contains metadata information for a database in a data catalog.

" + }, + "DatabaseList":{ + "type":"list", + "member":{"shape":"Database"} + }, "DatabaseString":{ "type":"string", "max":255, @@ -495,6 +769,21 @@ }, "documentation":"

A piece of data (a field in the table).

" }, + "DeleteDataCatalogInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog to delete.

" + } + } + }, + "DeleteDataCatalogOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteNamedQueryInput":{ "type":"structure", "required":["NamedQueryId"], @@ -565,6 +854,56 @@ "min":1 }, "ErrorMessage":{"type":"string"}, + "ExpressionString":{ + "type":"string", + "max":256, + "min":0 + }, + "GetDataCatalogInput":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog to return.

" + } + } + }, + "GetDataCatalogOutput":{ + "type":"structure", + "members":{ + "DataCatalog":{ + "shape":"DataCatalog", + "documentation":"

The data catalog returned.

" + } + } + }, + "GetDatabaseInput":{ + "type":"structure", + "required":[ + "CatalogName", + "DatabaseName" + ], + "members":{ + "CatalogName":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog that contains the database to return.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database to return.

" + } + } + }, + "GetDatabaseOutput":{ + "type":"structure", + "members":{ + "Database":{ + "shape":"Database", + "documentation":"

The database returned.

" + } + } + }, "GetNamedQueryInput":{ "type":"structure", "required":["NamedQueryId"], @@ -613,7 +952,7 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

The token that specifies where to start pagination if a previous request was truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" }, "MaxResults":{ "shape":"MaxQueryResults", @@ -634,7 +973,38 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

A token to be used by the next request if this request is truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, + "GetTableMetadataInput":{ + "type":"structure", + "required":[ + "CatalogName", + "DatabaseName", + "TableName" + ], + "members":{ + "CatalogName":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog that contains the database and table metadata to return.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database that contains the table metadata to return.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table for which metadata is returned.

" + } + } + }, + "GetTableMetadataOutput":{ + "type":"structure", + "members":{ + "TableMetadata":{ + "shape":"TableMetadata", + "documentation":"

An object that contains table metadata.

" } } }, @@ -681,12 +1051,75 @@ "documentation":"

Indicates that something is wrong with the input to the request. For example, a required parameter may be missing or out of range.

", "exception":true }, + "KeyString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "ListDataCatalogsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + }, + "MaxResults":{ + "shape":"MaxDataCatalogsCount", + "documentation":"

Specifies the maximum number of data catalogs to return.

" + } + } + }, + "ListDataCatalogsOutput":{ + "type":"structure", + "members":{ + "DataCatalogsSummary":{ + "shape":"DataCatalogSummaryList", + "documentation":"

A summary list of data catalogs.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, + "ListDatabasesInput":{ + "type":"structure", + "required":["CatalogName"], + "members":{ + "CatalogName":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog that contains the databases to return.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + }, + "MaxResults":{ + "shape":"MaxDatabasesCount", + "documentation":"

Specifies the maximum number of results to return.

" + } + } + }, + "ListDatabasesOutput":{ + "type":"structure", + "members":{ + "DatabaseList":{ + "shape":"DatabaseList", + "documentation":"

A list of databases from a data catalog.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, "ListNamedQueriesInput":{ "type":"structure", "members":{ "NextToken":{ "shape":"Token", - "documentation":"

The token that specifies where to start pagination if a previous request was truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" }, "MaxResults":{ "shape":"MaxNamedQueriesCount", @@ -694,7 +1127,7 @@ }, "WorkGroup":{ "shape":"WorkGroupName", - "documentation":"

The name of the workgroup from which the named queries are returned. If a workgroup is not specified, the saved queries for the primary workgroup are returned.

" + "documentation":"

The name of the workgroup from which the named queries are being returned. If a workgroup is not specified, the saved queries for the primary workgroup are returned.

" } } }, @@ -707,7 +1140,7 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

A token to be used by the next request if this request is truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" } } }, @@ -716,7 +1149,7 @@ "members":{ "NextToken":{ "shape":"Token", - "documentation":"

The token that specifies where to start pagination if a previous request was truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" }, "MaxResults":{ "shape":"MaxQueryExecutionsCount", @@ -724,7 +1157,7 @@ }, "WorkGroup":{ "shape":"WorkGroupName", - "documentation":"

The name of the workgroup from which queries are returned. If a workgroup is not specified, a list of available query execution IDs for the queries in the primary workgroup is returned.

" + "documentation":"

The name of the workgroup from which queries are being returned. If a workgroup is not specified, a list of available query execution IDs for the queries in the primary workgroup is returned.

" } } }, @@ -741,21 +1174,63 @@ } } }, + "ListTableMetadataInput":{ + "type":"structure", + "required":[ + "CatalogName", + "DatabaseName" + ], + "members":{ + "CatalogName":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog for which table metadata should be returned.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database for which table metadata should be returned.

" + }, + "Expression":{ + "shape":"ExpressionString", + "documentation":"

A regex filter that pattern-matches table names. If no expression is supplied, metadata for all tables are listed.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + }, + "MaxResults":{ + "shape":"MaxTableMetadataCount", + "documentation":"

Specifies the maximum number of results to return.

" + } + } + }, + "ListTableMetadataOutput":{ + "type":"structure", + "members":{ + "TableMetadataList":{ + "shape":"TableMetadataList", + "documentation":"

A list of table metadata.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" + } + } + }, "ListTagsForResourceInput":{ "type":"structure", "required":["ResourceARN"], "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

Lists the tags for the workgroup resource with the specified ARN.

" + "documentation":"

Lists the tags for the resource with the specified ARN.

" }, "NextToken":{ "shape":"Token", - "documentation":"

The token for the next set of results, or null if there are no additional results for this request, where the request lists the tags for the workgroup resource with the specified ARN.

" + "documentation":"

The token for the next set of results, or null if there are no additional results for this request, where the request lists the tags for the resource with the specified ARN.

" }, "MaxResults":{ "shape":"MaxTagsCount", - "documentation":"

The maximum number of results to be returned per request that lists the tags for the workgroup resource.

" + "documentation":"

The maximum number of results to be returned per request that lists the tags for the resource.

" } } }, @@ -764,7 +1239,7 @@ "members":{ "Tags":{ "shape":"TagList", - "documentation":"

The list of tags associated with this workgroup.

" + "documentation":"

The list of tags associated with the specified resource.

" }, "NextToken":{ "shape":"Token", @@ -777,7 +1252,7 @@ "members":{ "NextToken":{ "shape":"Token", - "documentation":"

A token to be used by the next request if this request is truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" }, "MaxResults":{ "shape":"MaxWorkGroupsCount", @@ -794,11 +1269,23 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

A token to be used by the next request if this request is truncated.

" + "documentation":"

A token generated by the Athena service that specifies where to continue pagination if a previous request was truncated. To obtain the next set of pages, pass in the NextToken from the response object of the previous page call.

" } } }, "Long":{"type":"long"}, + "MaxDataCatalogsCount":{ + "type":"integer", + "box":true, + "max":50, + "min":2 + }, + "MaxDatabasesCount":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, "MaxNamedQueriesCount":{ "type":"integer", "box":true, @@ -817,6 +1304,12 @@ "max":1000, "min":1 }, + "MaxTableMetadataCount":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, "MaxTagsCount":{ "type":"integer", "box":true, @@ -828,6 +1321,14 @@ "max":50, "min":1 }, + "MetadataException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

An exception that Athena received when it called a custom metastore. Occurs if the error is not caused by user input (InvalidRequestException) or from the Athena platform (InternalServerException). For example, if a user-created Lambda function is missing permissions, the Lambda 4XX exception is returned in a MetadataException.

", + "exception":true + }, "NameString":{ "type":"string", "max":128, @@ -879,6 +1380,15 @@ "type":"list", "member":{"shape":"NamedQuery"} }, + "ParametersMap":{ + "type":"map", + "key":{"shape":"KeyString"}, + "value":{"shape":"ParametersMapValue"} + }, + "ParametersMapValue":{ + "type":"string", + "max":51200 + }, "QueryExecution":{ "type":"structure", "members":{ @@ -922,10 +1432,14 @@ "members":{ "Database":{ "shape":"DatabaseString", - "documentation":"

The name of the database.

" + "documentation":"

The name of the database used in the query execution.

" + }, + "Catalog":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog used in the query execution.

" } }, - "documentation":"

The database in which the query execution occurs.

" + "documentation":"

The database and data catalog context in which the query execution occurs.

" }, "QueryExecutionId":{"type":"string"}, "QueryExecutionIdList":{ @@ -987,7 +1501,7 @@ "members":{ "State":{ "shape":"QueryExecutionState", - "documentation":"

The state of query execution. QUEUED indicates that the query has been submitted to the service, and Athena will execute the query as soon as resources are available. RUNNING indicates that the query is in execution phase. SUCCEEDED indicates that the query completed without errors. FAILED indicates that the query experienced an error and did not complete processing. CANCELLED indicates that a user input interrupted query execution.

" + "documentation":"

The state of query execution. QUEUED indicates that the query has been submitted to the service, and Athena will execute the query as soon as resources are available. RUNNING indicates that the query is in execution phase. SUCCEEDED indicates that the query completed without errors. FAILED indicates that the query experienced an error and did not complete processing. CANCELLED indicates that a user input interrupted query execution.

Athena automatically retries your queries in cases of certain transient errors. As a result, you may see the query state transition from RUNNING or FAILED to QUEUED.

" }, "StateChangeReason":{ "shape":"String", @@ -1066,7 +1580,7 @@ "documentation":"

The metadata that describes the column structure and data types of a table of query results.

" } }, - "documentation":"

The metadata and rows that comprise a query result set. The metadata describes the column structure and data types.

" + "documentation":"

The metadata and rows that comprise a query result set. The metadata describes the column structure and data types. To return a ResultSet object, use GetQueryResults.

" }, "ResultSetMetadata":{ "type":"structure", @@ -1076,7 +1590,7 @@ "documentation":"

Information about the columns returned in a query result metadata.

" } }, - "documentation":"

The metadata that describes the column structure and data types of a table of query results.

" + "documentation":"

The metadata that describes the column structure and data types of a table of query results. To return a ResultSetMetadata object, use GetQueryResults.

" }, "Row":{ "type":"structure", @@ -1153,6 +1667,49 @@ } }, "String":{"type":"string"}, + "TableMetadata":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the table.

" + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The time that the table was created.

" + }, + "LastAccessTime":{ + "shape":"Timestamp", + "documentation":"

The last time the table was accessed.

" + }, + "TableType":{ + "shape":"TableTypeString", + "documentation":"

The type of table. In Athena, only EXTERNAL_TABLE is supported.

" + }, + "Columns":{ + "shape":"ColumnList", + "documentation":"

A list of the columns in the table.

" + }, + "PartitionKeys":{ + "shape":"ColumnList", + "documentation":"

A list of the partition keys in the table.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

A set of custom key/value pairs for table properties.

" + } + }, + "documentation":"

Contains metadata for a table.

" + }, + "TableMetadataList":{ + "type":"list", + "member":{"shape":"TableMetadata"} + }, + "TableTypeString":{ + "type":"string", + "max":255 + }, "Tag":{ "type":"structure", "members":{ @@ -1165,7 +1722,7 @@ "documentation":"

A tag value. The tag value length is from 0 to 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag values are case-sensitive.

" } }, - "documentation":"

A tag that you can add to a resource. A tag is a label that you assign to an AWS Athena resource (a workgroup). Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize workgroups in Athena, for example, by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups in your account. The maximum tag key length is 128 Unicode characters in UTF-8. The maximum tag value length is 256 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource.

" + "documentation":"

A label that you assign to a resource. In Athena, a resource can be a workgroup or data catalog. Each tag consists of a key and an optional value, both of which you define. For example, you can use tags to categorize Athena workgroups or data catalogs by purpose, owner, or environment. Use a consistent set of tag keys to make it easier to search and filter workgroups or data catalogs in your account. For best practices, see Tagging Best Practices. Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be unique per resource. If you specify more than one tag, separate them by commas.

" }, "TagKey":{ "type":"string", @@ -1189,11 +1746,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

Requests that one or more tags are added to the resource (such as a workgroup) for the specified ARN.

" + "documentation":"

Specifies the ARN of the Athena resource (workgroup or data catalog) to which tags are to be added.

" }, "Tags":{ "shape":"TagList", - "documentation":"

One or more tags, separated by commas, to be added to the resource, such as a workgroup.

" + "documentation":"

A collection of one or more tags, separated by commas, to be added to an Athena workgroup or data catalog resource.

" } } }, @@ -1212,6 +1769,7 @@ "documentation":"

The reason for the query throttling, for example, when it exceeds the concurrent query limit.

", "enum":["CONCURRENT_QUERY_LIMIT_EXCEEDED"] }, + "Timestamp":{"type":"timestamp"}, "Token":{ "type":"string", "max":1024, @@ -1226,6 +1784,12 @@ "documentation":"

Indicates that the request was throttled.

", "exception":true }, + "TypeString":{ + "type":"string", + "max":4096, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, "UnprocessedNamedQueryId":{ "type":"structure", "members":{ @@ -1279,11 +1843,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

Removes one or more tags from the workgroup resource for the specified ARN.

" + "documentation":"

Specifies the ARN of the resource from which tags are to be removed.

" }, "TagKeys":{ "shape":"TagKeyList", - "documentation":"

Removes the tags associated with one or more tag keys from the workgroup resource.

" + "documentation":"

A comma-separated list of one or more tag keys whose tags are to be removed from the specified resource.

" } } }, @@ -1292,6 +1856,36 @@ "members":{ } }, + "UpdateDataCatalogInput":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"CatalogNameString", + "documentation":"

The name of the data catalog to update. The catalog name must be unique for the AWS account and can use a maximum of 128 alphanumeric, underscore, at sign, or hyphen characters.

" + }, + "Type":{ + "shape":"DataCatalogType", + "documentation":"

Specifies the type of data catalog to update. Specify LAMBDA for a federated catalog, GLUE for AWS Glue Catalog, or HIVE for an external hive metastore.

" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"

New or modified text that describes the data catalog.

" + }, + "Parameters":{ + "shape":"ParametersMap", + "documentation":"

Specifies the Lambda function or functions to use for updating the data catalog. This is a mapping whose values depend on the catalog type.

  • For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version.

    metadata-function=lambda_arn, sdk-version=version_number

  • For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both.

    • If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required.

      metadata-function=lambda_arn, record-function=lambda_arn

    • If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function.

      function=lambda_arn

  • The GLUE type has no parameters.

" + } + } + }, + "UpdateDataCatalogOutput":{ + "type":"structure", + "members":{ + } + }, "UpdateWorkGroupInput":{ "type":"structure", "required":["WorkGroup"], @@ -1409,7 +2003,7 @@ }, "WorkGroupName":{ "type":"string", - "pattern":"[a-zA-z0-9._-]{1,128}" + "pattern":"[a-zA-Z0-9._-]{1,128}" }, "WorkGroupState":{ "type":"string", diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index 58013117c1cc..fb2a06e842d7 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscaling/src/main/resources/codegen-resources/service-2.json b/services/autoscaling/src/main/resources/codegen-resources/service-2.json index b66a03a92b87..84d964fdacca 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/autoscaling/src/main/resources/codegen-resources/service-2.json @@ -39,7 +39,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Attaches one or more target groups to the specified Auto Scaling group.

To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups.

With Application Load Balancers and Network Load Balancers, instances are registered as targets with a target group. With Classic Load Balancers, instances are registered with the load balancer. For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Attaches one or more target groups to the specified Auto Scaling group.

To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups API. To detach the target group from the Auto Scaling group, call the DetachLoadBalancerTargetGroups API.

With Application Load Balancers and Network Load Balancers, instances are registered as targets with a target group. With Classic Load Balancers, instances are registered with the load balancer. For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "AttachLoadBalancers":{ "name":"AttachLoadBalancers", @@ -56,7 +56,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Attaches one or more Classic Load Balancers to the specified Auto Scaling group.

To attach an Application Load Balancer or a Network Load Balancer instead, see AttachLoadBalancerTargetGroups.

To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers.

For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

To attach an Application Load Balancer or a Network Load Balancer, use the AttachLoadBalancerTargetGroups API operation instead.

Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling registers the running instances with these Classic Load Balancers.

To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach the load balancer from the Auto Scaling group, call the DetachLoadBalancers API.

For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "BatchDeleteScheduledAction":{ "name":"BatchDeleteScheduledAction", @@ -92,6 +92,24 @@ ], "documentation":"

Creates or updates one or more scheduled scaling actions for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.

" }, + "CancelInstanceRefresh":{ + "name":"CancelInstanceRefresh", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelInstanceRefreshType"}, + "output":{ + "shape":"CancelInstanceRefreshAnswer", + "resultWrapper":"CancelInstanceRefreshResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"}, + {"shape":"ActiveInstanceRefreshNotFoundFault"} + ], + "documentation":"

Cancels an instance refresh operation in progress. Cancellation does not roll back any replacements that have already been completed, but it prevents new replacements from being started.

For more information, see Replacing Auto Scaling Instances Based on an Instance Refresh.

" + }, "CompleteLifecycleAction":{ "name":"CompleteLifecycleAction", "http":{ @@ -121,7 +139,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Creates an Auto Scaling group with the specified name and attributes.

If you exceed your maximum limit of Auto Scaling groups, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Amazon EC2 Auto Scaling Service Quotas in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates an Auto Scaling group with the specified name and attributes.

If you exceed your maximum limit of Auto Scaling groups, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling Service Quotas in the Amazon EC2 Auto Scaling User Guide.

For introductory exercises for creating an Auto Scaling group, see Getting Started with Amazon EC2 Auto Scaling and Tutorial: Set Up a Scaled and Load-Balanced Application in the Amazon EC2 Auto Scaling User Guide. For more information, see Auto Scaling Groups in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateLaunchConfiguration":{ "name":"CreateLaunchConfiguration", @@ -135,7 +153,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates a launch configuration.

If you exceed your maximum limit of launch configurations, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Amazon EC2 Auto Scaling Service Quotas in the Amazon EC2 Auto Scaling User Guide.

For more information, see Launch Configurations in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates a launch configuration.

If you exceed your maximum limit of launch configurations, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling Service Quotas in the Amazon EC2 Auto Scaling User Guide.

For more information, see Launch Configurations in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateOrUpdateTags":{ "name":"CreateOrUpdateTags", @@ -164,7 +182,7 @@ {"shape":"ResourceInUseFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Deletes the specified Auto Scaling group.

If the group has instances or scaling activities in progress, you must specify the option to force the deletion in order for it to succeed.

If the group has policies, deleting the group deletes the policies, the underlying alarm actions, and any alarm that no longer has an associated action.

To remove instances from the Auto Scaling group before deleting it, call DetachInstances with the list of instances and the option to decrement the desired capacity. This ensures that Amazon EC2 Auto Scaling does not launch replacement instances.

To terminate all instances before deleting the Auto Scaling group, call UpdateAutoScalingGroup and set the minimum size and desired capacity of the Auto Scaling group to zero.

" + "documentation":"

Deletes the specified Auto Scaling group.

If the group has instances or scaling activities in progress, you must specify the option to force the deletion in order for it to succeed.

If the group has policies, deleting the group deletes the policies, the underlying alarm actions, and any alarm that no longer has an associated action.

To remove instances from the Auto Scaling group before deleting it, call the DetachInstances API with the list of instances and the option to decrement the desired capacity. This ensures that Amazon EC2 Auto Scaling does not launch replacement instances.

To terminate all instances before deleting the Auto Scaling group, call the UpdateAutoScalingGroup API and set the minimum size and desired capacity of the Auto Scaling group to zero.

" }, "DeleteLaunchConfiguration":{ "name":"DeleteLaunchConfiguration", @@ -273,7 +291,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the policy adjustment types for use with PutScalingPolicy.

" + "documentation":"

Describes the available adjustment types for Amazon EC2 Auto Scaling scaling policies. These settings apply to step scaling policies and simple scaling policies; they do not apply to target tracking scaling policies.

The following adjustment types are supported:

  • ChangeInCapacity

  • ExactCapacity

  • PercentChangeInCapacity

" }, "DescribeAutoScalingGroups":{ "name":"DescribeAutoScalingGroups", @@ -324,6 +342,23 @@ ], "documentation":"

Describes the notification types that are supported by Amazon EC2 Auto Scaling.

" }, + "DescribeInstanceRefreshes":{ + "name":"DescribeInstanceRefreshes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceRefreshesType"}, + "output":{ + "shape":"DescribeInstanceRefreshesAnswer", + "resultWrapper":"DescribeInstanceRefreshesResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"ResourceContentionFault"} + ], + "documentation":"

Describes one or more instance refreshes.

You can determine the status of a request by looking at the Status parameter. The following are the possible statuses:

  • Pending - The request was created, but the operation has not started.

  • InProgress - The operation is in progress.

  • Successful - The operation completed successfully.

  • Failed - The operation failed to complete. You can troubleshoot using the status reason and the scaling activities.

  • Cancelling - An ongoing operation is being cancelled. Cancellation does not roll back any replacements that have already been completed, but it prevents new replacements from being started.

  • Cancelled - The operation is cancelled.

For more information, see Replacing Auto Scaling Instances Based on an Instance Refresh.

" + }, "DescribeLaunchConfigurations":{ "name":"DescribeLaunchConfigurations", "http":{ @@ -402,7 +437,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the load balancers for the specified Auto Scaling group.

This operation describes only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use DescribeLoadBalancerTargetGroups instead.

" + "documentation":"

Describes the load balancers for the specified Auto Scaling group.

This operation describes only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use the DescribeLoadBalancerTargetGroups API instead.

" }, "DescribeMetricCollectionTypes":{ "name":"DescribeMetricCollectionTypes", @@ -417,7 +452,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling.

The GroupStandbyInstances metric is not returned by default. You must explicitly request this metric when calling EnableMetricsCollection.

" + "documentation":"

Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling.

The GroupStandbyInstances metric is not returned by default. You must explicitly request this metric when calling the EnableMetricsCollection API.

" }, "DescribeNotificationConfigurations":{ "name":"DescribeNotificationConfigurations", @@ -484,7 +519,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the scaling process types for use with ResumeProcesses and SuspendProcesses.

" + "documentation":"

Describes the scaling process types for use with the ResumeProcesses and SuspendProcesses APIs.

" }, "DescribeScheduledActions":{ "name":"DescribeScheduledActions", @@ -501,7 +536,7 @@ {"shape":"InvalidNextToken"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the actions scheduled for your Auto Scaling group that haven't run or that have not reached their end time. To describe the actions that have already run, use DescribeScalingActivities.

" + "documentation":"

Describes the actions scheduled for your Auto Scaling group that haven't run or that have not reached their end time. To describe the actions that have already run, call the DescribeScalingActivities API.

" }, "DescribeTags":{ "name":"DescribeTags", @@ -518,7 +553,7 @@ {"shape":"InvalidNextToken"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Describes the specified tags.

You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.

You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there's no match, no special message is returned.

" + "documentation":"

Describes the specified tags.

You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.

You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there's no match, no special message is returned.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" }, "DescribeTerminationPolicyTypes":{ "name":"DescribeTerminationPolicyTypes", @@ -581,7 +616,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Detaches one or more Classic Load Balancers from the specified Auto Scaling group.

This operation detaches only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use DetachLoadBalancerTargetGroups instead.

When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using DescribeLoadBalancers. The instances remain running.

" + "documentation":"

Detaches one or more Classic Load Balancers from the specified Auto Scaling group.

This operation detaches only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use the DetachLoadBalancerTargetGroups API instead.

When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using the DescribeLoadBalancers API call. The instances remain running.

" }, "DisableMetricsCollection":{ "name":"DisableMetricsCollection", @@ -634,7 +669,7 @@ {"shape":"ScalingActivityInProgressFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Executes the specified policy.

" + "documentation":"

Executes the specified policy. This can be useful for testing the design of your scaling policy.

" }, "ExitStandby":{ "name":"ExitStandby", @@ -667,7 +702,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using RecordLifecycleActionHeartbeat.

  5. If you finish before the timeout period ends, complete the lifecycle action using CompleteLifecycleAction.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook.

" + "documentation":"

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using the RecordLifecycleActionHeartbeat API call.

  5. If you finish before the timeout period ends, complete the lifecycle action using the CompleteLifecycleAction API call.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks API call. If you are no longer using a lifecycle hook, you can delete it by calling the DeleteLifecycleHook API.

" }, "PutNotificationConfiguration":{ "name":"PutNotificationConfiguration", @@ -699,7 +734,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Creates or updates a scaling policy for an Auto Scaling group.

For more information about using scaling policies to scale your Auto Scaling group automatically, see Dynamic Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Creates or updates a scaling policy for an Auto Scaling group.

For more information about using scaling policies to scale your Auto Scaling group, see Target Tracking Scaling Policies and Step and Simple Scaling Policies in the Amazon EC2 Auto Scaling User Guide.

" }, "PutScheduledUpdateGroupAction":{ "name":"PutScheduledUpdateGroupAction", @@ -729,7 +764,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using PutLifecycleHook.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling Lifecycle in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling Lifecycle in the Amazon EC2 Auto Scaling User Guide.

" }, "ResumeProcesses":{ "name":"ResumeProcesses", @@ -755,7 +790,7 @@ {"shape":"ScalingActivityInProgressFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Sets the size of the specified Auto Scaling group.

For more information about desired capacity, see What Is Amazon EC2 Auto Scaling? in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Sets the size of the specified Auto Scaling group.

If a scale-in activity occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

For more information, see Manual Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "SetInstanceHealth":{ "name":"SetInstanceHealth", @@ -786,6 +821,24 @@ ], "documentation":"

Updates the instance protection settings of the specified instances.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

" }, + "StartInstanceRefresh":{ + "name":"StartInstanceRefresh", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInstanceRefreshType"}, + "output":{ + "shape":"StartInstanceRefreshAnswer", + "resultWrapper":"StartInstanceRefreshResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"}, + {"shape":"InstanceRefreshInProgressFault"} + ], + "documentation":"

Starts a new instance refresh operation, which triggers a rolling replacement of all previously launched instances in the Auto Scaling group with a new group of instances.

If successful, this call creates a new instance refresh request with a unique ID that you can use to track its progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh operation in progress, use the CancelInstanceRefresh API.

For more information, see Replacing Auto Scaling Instances Based on an Instance Refresh.

" + }, "SuspendProcesses":{ "name":"SuspendProcesses", "http":{ @@ -797,7 +850,7 @@ {"shape":"ResourceInUseFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Suspends the specified automatic scaling processes, or all processes, for the specified Auto Scaling group.

If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly.

To resume processes that have been suspended, use ResumeProcesses.

For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Suspends the specified automatic scaling processes, or all processes, for the specified Auto Scaling group.

If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly. For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

To resume processes that have been suspended, call the ResumeProcesses API.

" }, "TerminateInstanceInAutoScalingGroup":{ "name":"TerminateInstanceInAutoScalingGroup", @@ -814,7 +867,7 @@ {"shape":"ScalingActivityInProgressFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Terminates the specified instance and optionally adjusts the desired group size. This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing Activities in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Terminates the specified instance and optionally adjusts the desired group size.

This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing Activities in the Amazon EC2 Auto Scaling User Guide.

" }, "UpdateAutoScalingGroup":{ "name":"UpdateAutoScalingGroup", @@ -828,10 +881,23 @@ {"shape":"ResourceContentionFault"}, {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"

Updates the configuration for the specified Auto Scaling group.

To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any parameters that you don't specify are not changed by this update request. The new settings take effect on any scaling activities after this call returns.

If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get the updated configuration. Existing instances continue to run with the configuration that they were originally launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or template, existing instances may be replaced to match the new purchasing options that you specified in the policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that updating your group does not compromise the performance or availability of your application.

Note the following about changing DesiredCapacity, MaxSize, or MinSize:

  • If a scale-in event occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

  • If you specify a new value for MinSize without specifying a value for DesiredCapacity, and the new MinSize is larger than the current size of the group, this sets the group's DesiredCapacity to the new MinSize value.

  • If you specify a new value for MaxSize without specifying a value for DesiredCapacity, and the new MaxSize is smaller than the current size of the group, this sets the group's DesiredCapacity to the new MaxSize value.

To see which parameters have been set, use DescribeAutoScalingGroups. You can also view the scaling policies for an Auto Scaling group using DescribePolicies. If the group has scaling policies, you can update them using PutScalingPolicy.

" + "documentation":"

Updates the configuration for the specified Auto Scaling group.

To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any parameters that you don't specify are not changed by this update request. The new settings take effect on any scaling activities after this call returns.

If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get the updated configuration. Existing instances continue to run with the configuration that they were originally launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or template, existing instances may be replaced to match the new purchasing options that you specified in the policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that updating your group does not compromise the performance or availability of your application.

Note the following about changing DesiredCapacity, MaxSize, or MinSize:

  • If a scale-in activity occurs as a result of a new DesiredCapacity value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.

  • If you specify a new value for MinSize without specifying a value for DesiredCapacity, and the new MinSize is larger than the current size of the group, this sets the group's DesiredCapacity to the new MinSize value.

  • If you specify a new value for MaxSize without specifying a value for DesiredCapacity, and the new MaxSize is smaller than the current size of the group, this sets the group's DesiredCapacity to the new MaxSize value.

To see which parameters have been set, call the DescribeAutoScalingGroups API. To view the scaling policies for an Auto Scaling group, call the DescribePolicies API. If the group has scaling policies, you can update them by calling the PutScalingPolicy API.

" } }, "shapes":{ + "ActiveInstanceRefreshNotFoundFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "documentation":"

The request failed because an active instance refresh for the specified Auto Scaling group was not found.

", + "error":{ + "code":"ActiveInstanceRefreshNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "Activities":{ "type":"list", "member":{"shape":"Activity"} @@ -1076,7 +1142,7 @@ }, "DefaultCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.

" + "documentation":"

The duration of the default cooldown period, in seconds.

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", @@ -1124,7 +1190,7 @@ }, "Status":{ "shape":"XmlStringMaxLen255", - "documentation":"

The current state of the group when DeleteAutoScalingGroup is in progress.

" + "documentation":"

The current state of the group when the DeleteAutoScalingGroup operation is in progress.

" }, "Tags":{ "shape":"TagDescriptionList", @@ -1144,7 +1210,7 @@ }, "MaxInstanceLifetime":{ "shape":"MaxInstanceLifetime", - "documentation":"

The maximum amount of time, in seconds, that an instance can be in service.

Valid Range: Minimum value of 604800.

" + "documentation":"

The maximum amount of time, in seconds, that an instance can be in service.

Valid Range: Minimum value of 0.

" } }, "documentation":"

Describes an Auto Scaling group.

" @@ -1346,7 +1412,7 @@ "members":{ "VirtualName":{ "shape":"XmlStringMaxLen255", - "documentation":"

The name of the virtual device (for example, ephemeral0).

" + "documentation":"

The name of the virtual device (for example, ephemeral0).

You can specify either VirtualName or Ebs, but not both.

" }, "DeviceName":{ "shape":"XmlStringMaxLen255", @@ -1354,11 +1420,11 @@ }, "Ebs":{ "shape":"Ebs", - "documentation":"

The information about the Amazon EBS volume.

" + "documentation":"

Parameters used to automatically set up EBS volumes when an instance is launched.

You can specify either VirtualName or Ebs, but not both.

" }, "NoDevice":{ "shape":"NoDevice", - "documentation":"

Suppresses a device mapping.

If this parameter is true for the root device, the instance might fail the EC2 health check. In that case, Amazon EC2 Auto Scaling launches a replacement instance.

" + "documentation":"

Setting this value to true suppresses the specified device included in the block device mapping of the AMI.

If NoDevice is true for the root device, instances might fail the EC2 health check. In that case, Amazon EC2 Auto Scaling launches replacement instances.

If you specify NoDevice, you cannot specify Ebs.

" } }, "documentation":"

Describes a block device mapping.

" @@ -1367,6 +1433,25 @@ "type":"list", "member":{"shape":"BlockDeviceMapping"} }, + "CancelInstanceRefreshAnswer":{ + "type":"structure", + "members":{ + "InstanceRefreshId":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The instance refresh ID.

" + } + } + }, + "CancelInstanceRefreshType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + } + } + }, "ClassicLinkVPCSecurityGroups":{ "type":"list", "member":{"shape":"XmlStringMaxLen255"} @@ -1421,11 +1506,11 @@ }, "LaunchConfigurationName":{ "shape":"ResourceName", - "documentation":"

The name of the launch configuration.

If you do not specify LaunchConfigurationName, you must specify one of the following parameters: InstanceId, LaunchTemplate, or MixedInstancesPolicy.

" + "documentation":"

The name of the launch configuration to use when an instance is launched. To get the launch configuration name, use the DescribeLaunchConfigurations API operation. New launch configurations can be created with the CreateLaunchConfiguration API.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

" }, "LaunchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

The launch template to use to launch instances.

For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference.

If you do not specify LaunchTemplate, you must specify one of the following parameters: InstanceId, LaunchConfigurationName, or MixedInstancesPolicy.

" + "documentation":"

Parameters used to specify the launch template and version to use when an instance is launched.

For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference.

You can alternatively associate a launch template to the Auto Scaling group by using the MixedInstancesPolicy parameter.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

" }, "MixedInstancesPolicy":{ "shape":"MixedInstancesPolicy", @@ -1433,7 +1518,7 @@ }, "InstanceId":{ "shape":"XmlStringMaxLen19", - "documentation":"

The ID of the instance used to create a launch configuration for the group.

When you specify an ID of an instance, Amazon EC2 Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, except for the block device mapping.

For more information, see Create an Auto Scaling Group Using an EC2 Instance in the Amazon EC2 Auto Scaling User Guide.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

" + "documentation":"

The ID of the instance used to create a launch configuration for the group. To get the instance ID, use the Amazon EC2 DescribeInstances API operation.

When you specify an ID of an instance, Amazon EC2 Auto Scaling creates a new launch configuration and associates it with the group. This launch configuration derives its attributes from the specified instance, except for the block device mapping.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -1441,15 +1526,15 @@ }, "MaxSize":{ "shape":"AutoScalingGroupMaxSize", - "documentation":"

The maximum size of the group.

" + "documentation":"

The maximum size of the group.

With a mixed instances policy that uses instance weighting, Amazon EC2 Auto Scaling may need to go above MaxSize to meet your capacity requirements. In this event, Amazon EC2 Auto Scaling will never go above MaxSize by more than your maximum instance weight (weights that define how many capacity units each instance contributes to the capacity of the group).

" }, "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", - "documentation":"

The number of Amazon EC2 instances that the Auto Scaling group attempts to maintain. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity, the default is the minimum size of the group.

" + "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group at the time of its creation and the capacity it attempts to maintain. It can scale beyond this capacity if you configure automatic scaling.

This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity, the default is the minimum size of the group.

" }, "DefaultCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300.

This setting applies when using simple scaling policies, but not when using other scaling policies or scheduled scaling. For more information, see Scaling Cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", @@ -1469,7 +1554,7 @@ }, "HealthCheckGracePeriod":{ "shape":"HealthCheckGracePeriod", - "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default value is 0.

For more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide.

Conditional: This parameter is required if you are adding an ELB health check.

" + "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. During this time, any health check failures for the instance are ignored. The default value is 0.

For more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide.

Required if you are adding an ELB health check.

" }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", @@ -1493,7 +1578,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

One or more tags.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

One or more tags. You can tag your Auto Scaling group and propagate the tags to the Amazon EC2 instances it launches.

Tags are not propagated to Amazon EBS volumes. To add tags to Amazon EBS volumes, specify the tags in a launch template but use caution. If the launch template specifies an instance tag with a key that is also specified for the Auto Scaling group, Amazon EC2 Auto Scaling overrides the value of that instance tag with the value specified by the Auto Scaling group.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" }, "ServiceLinkedRoleARN":{ "shape":"ResourceName", @@ -1501,7 +1586,7 @@ }, "MaxInstanceLifetime":{ "shape":"MaxInstanceLifetime", - "documentation":"

The maximum amount of time, in seconds, that an instance can be in service.

For more information, see Replacing Auto Scaling Instances Based on Maximum Instance Lifetime in the Amazon EC2 Auto Scaling User Guide.

Valid Range: Minimum value of 604800.

" + "documentation":"

The maximum amount of time, in seconds, that an instance can be in service. The default is null.

This parameter is optional, but if you specify a value for it, you must specify a value of at least 604,800 seconds (7 days). To clear a previously set value, specify a new value of 0.

For more information, see Replacing Auto Scaling Instances Based on Maximum Instance Lifetime in the Amazon EC2 Auto Scaling User Guide.

Valid Range: Minimum value of 0.

" } } }, @@ -1774,6 +1859,41 @@ } } }, + "DescribeInstanceRefreshesAnswer":{ + "type":"structure", + "members":{ + "InstanceRefreshes":{ + "shape":"InstanceRefreshes", + "documentation":"

The instance refreshes for the specified group.

" + }, + "NextToken":{ + "shape":"XmlString", + "documentation":"

A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.

" + } + } + }, + "DescribeInstanceRefreshesType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + }, + "InstanceRefreshIds":{ + "shape":"InstanceRefreshIds", + "documentation":"

One or more instance refresh IDs.

" + }, + "NextToken":{ + "shape":"XmlString", + "documentation":"

The token for the next set of items to return. (You received this token from a previous call.)

" + }, + "MaxRecords":{ + "shape":"MaxRecords", + "documentation":"

The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.

" + } + } + }, "DescribeLifecycleHookTypesAnswer":{ "type":"structure", "members":{ @@ -2097,7 +2217,7 @@ }, "Metrics":{ "shape":"Metrics", - "documentation":"

One or more of the following metrics. If you omit this parameter, all metrics are disabled.

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

" + "documentation":"

Specifies one or more of the following metrics:

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

  • GroupInServiceCapacity

  • GroupPendingCapacity

  • GroupStandbyCapacity

  • GroupTerminatingCapacity

  • GroupTotalCapacity

If you omit this parameter, all metrics are disabled.

" } } }, @@ -2107,11 +2227,11 @@ "members":{ "SnapshotId":{ "shape":"XmlStringMaxLen255", - "documentation":"

The snapshot ID of the volume to use.

Conditional: This parameter is optional if you specify a volume size. If you specify both SnapshotId and VolumeSize, VolumeSize must be equal or greater than the size of the snapshot.

" + "documentation":"

The snapshot ID of the volume to use.

You must specify either a VolumeSize or a SnapshotId.

" }, "VolumeSize":{ "shape":"BlockDeviceEbsVolumeSize", - "documentation":"

The volume size, in Gibibytes (GiB).

This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384 for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the snapshot size.

At least one of VolumeSize or SnapshotId is required.

" + "documentation":"

The volume size, in Gibibytes (GiB).

This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384 for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the snapshot size.

You must specify either a VolumeSize or a SnapshotId. If you specify both SnapshotId and VolumeSize, the volume size must be equal or greater than the size of the snapshot.

" }, "VolumeType":{ "shape":"BlockDeviceEbsVolumeType", @@ -2123,14 +2243,14 @@ }, "Iops":{ "shape":"BlockDeviceEbsIops", - "documentation":"

The number of I/O operations per second (IOPS) to provision for the volume. The maximum ratio of IOPS to volume size (in GiB) is 50:1. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.

Conditional: This parameter is required when the volume type is io1. (Not used with standard, gp2, st1, or sc1 volumes.)

" + "documentation":"

The number of I/O operations per second (IOPS) to provision for the volume. The maximum ratio of IOPS to volume size (in GiB) is 50:1. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.

Required when the volume type is io1. (Not used with standard, gp2, st1, or sc1 volumes.)

" }, "Encrypted":{ "shape":"BlockDeviceEbsEncrypted", "documentation":"

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.

If you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the AWS managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.

Enabling encryption by default results in all EBS volumes being encrypted with the AWS managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.

For more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK Key Policy for Use with Encrypted Volumes in the Amazon EC2 Auto Scaling User Guide.

" } }, - "documentation":"

Describes an Amazon EBS volume. Used in combination with BlockDeviceMapping.

" + "documentation":"

Describes information used to set up an Amazon EBS volume specified in a block device mapping.

" }, "EbsOptimized":{"type":"boolean"}, "EnableMetricsCollectionQuery":{ @@ -2146,7 +2266,7 @@ }, "Metrics":{ "shape":"Metrics", - "documentation":"

One or more of the following metrics. If you omit this parameter, all metrics are enabled.

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

" + "documentation":"

Specifies which group-level metrics to start collecting. You can specify one or more of the following metrics:

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

The instance weighting feature supports the following additional metrics:

  • GroupInServiceCapacity

  • GroupPendingCapacity

  • GroupStandbyCapacity

  • GroupTerminatingCapacity

  • GroupTotalCapacity

If you omit this parameter, all metrics are enabled.

" }, "Granularity":{ "shape":"XmlStringMaxLen255", @@ -2159,7 +2279,7 @@ "members":{ "Metric":{ "shape":"XmlStringMaxLen255", - "documentation":"

One of the following metrics:

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

" + "documentation":"

One of the following metrics:

  • GroupMinSize

  • GroupMaxSize

  • GroupDesiredCapacity

  • GroupInServiceInstances

  • GroupPendingInstances

  • GroupStandbyInstances

  • GroupTerminatingInstances

  • GroupTotalInstances

  • GroupInServiceCapacity

  • GroupPendingCapacity

  • GroupStandbyCapacity

  • GroupTerminatingCapacity

  • GroupTotalCapacity

" }, "Granularity":{ "shape":"XmlStringMaxLen255", @@ -2217,15 +2337,15 @@ }, "HonorCooldown":{ "shape":"HonorCooldown", - "documentation":"

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.

This parameter is not supported if the policy type is StepScaling or TargetTrackingScaling.

For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing the policy.

Valid only if the policy type is SimpleScaling. For more information, see Scaling Cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "MetricValue":{ "shape":"MetricScale", - "documentation":"

The metric value to compare to BreachThreshold. This enables you to execute a policy of type StepScaling and determine which step adjustment to use. For example, if the breach threshold is 50 and you want to use a step adjustment with a lower bound of 0 and an upper bound of 10, you can set the metric value to 59.

If you specify a metric value that doesn't correspond to a step adjustment for the policy, the call returns an error.

Conditional: This parameter is required if the policy type is StepScaling and not supported otherwise.

" + "documentation":"

The metric value to compare to BreachThreshold. This enables you to execute a policy of type StepScaling and determine which step adjustment to use. For example, if the breach threshold is 50 and you want to use a step adjustment with a lower bound of 0 and an upper bound of 10, you can set the metric value to 59.

If you specify a metric value that doesn't correspond to a step adjustment for the policy, the call returns an error.

Required if the policy type is StepScaling and not supported otherwise.

" }, "BreachThreshold":{ "shape":"MetricScale", - "documentation":"

The breach threshold for the alarm.

Conditional: This parameter is required if the policy type is StepScaling and not supported otherwise.

" + "documentation":"

The breach threshold for the alarm.

Required if the policy type is StepScaling and not supported otherwise.

" } } }, @@ -2280,14 +2400,14 @@ "members":{ "Name":{ "shape":"XmlString", - "documentation":"

The name of the filter. The valid values are: \"auto-scaling-group\", \"key\", \"value\", and \"propagate-at-launch\".

" + "documentation":"

The name of the filter. The valid values are: auto-scaling-group, key, value, and propagate-at-launch.

" }, "Values":{ "shape":"Values", - "documentation":"

The value of the filter.

" + "documentation":"

One or more filter values. Filter values are case-sensitive.

" } }, - "documentation":"

Describes a filter.

" + "documentation":"

Describes a filter that is used to return a more specific list of results when describing tags.

For more information, see Tagging Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

" }, "Filters":{ "type":"list", @@ -2362,6 +2482,76 @@ "documentation":"

Describes whether detailed monitoring is enabled for the Auto Scaling instances.

" }, "InstanceProtected":{"type":"boolean"}, + "InstanceRefresh":{ + "type":"structure", + "members":{ + "InstanceRefreshId":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The instance refresh ID.

" + }, + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + }, + "Status":{ + "shape":"InstanceRefreshStatus", + "documentation":"

The current status for the instance refresh operation:

  • Pending - The request was created, but the operation has not started.

  • InProgress - The operation is in progress.

  • Successful - The operation completed successfully.

  • Failed - The operation failed to complete. You can troubleshoot using the status reason and the scaling activities.

  • Cancelling - An ongoing operation is being cancelled. Cancellation does not roll back any replacements that have already been completed, but it prevents new replacements from being started.

  • Cancelled - The operation is cancelled.

" + }, + "StatusReason":{ + "shape":"XmlStringMaxLen1023", + "documentation":"

Provides more details about the current status of the instance refresh.

" + }, + "StartTime":{ + "shape":"TimestampType", + "documentation":"

The date and time at which the instance refresh began.

" + }, + "EndTime":{ + "shape":"TimestampType", + "documentation":"

The date and time at which the instance refresh ended.

" + }, + "PercentageComplete":{ + "shape":"IntPercent", + "documentation":"

The percentage of the instance refresh that is complete. For each instance replacement, Amazon EC2 Auto Scaling tracks the instance's health status and warm-up time. When the instance's health status changes to healthy and the specified warm-up time passes, the instance is considered updated and added to the percentage complete.

" + }, + "InstancesToUpdate":{ + "shape":"InstancesToUpdate", + "documentation":"

The number of instances remaining to update before the instance refresh is complete.

" + } + }, + "documentation":"

Describes an instance refresh for an Auto Scaling group.

" + }, + "InstanceRefreshIds":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "InstanceRefreshInProgressFault":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "documentation":"

The request failed because an active instance refresh operation already exists for the specified Auto Scaling group.

", + "error":{ + "code":"InstanceRefreshInProgress", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "InstanceRefreshStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Successful", + "Failed", + "Cancelling", + "Cancelled" + ] + }, + "InstanceRefreshes":{ + "type":"list", + "member":{"shape":"InstanceRefresh"} + }, "Instances":{ "type":"list", "member":{"shape":"Instance"} @@ -2394,7 +2584,16 @@ "documentation":"

The maximum price per unit hour that you are willing to pay for a Spot Instance. If you leave the value of this parameter blank (which is the default), the maximum Spot price is set at the On-Demand price.

To remove a value that you previously set, include the parameter but leave the value blank.

" } }, - "documentation":"

Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.

When you update SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" + "documentation":"

Describes an instances distribution for an Auto Scaling group with a MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.

When you update SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" + }, + "InstancesToUpdate":{ + "type":"integer", + "min":0 + }, + "IntPercent":{ + "type":"integer", + "max":100, + "min":0 }, "InvalidNextToken":{ "type":"structure", @@ -2558,7 +2757,7 @@ }, "Overrides":{ "shape":"Overrides", - "documentation":"

An optional setting. Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You can specify between 1 and 20 instance types.

" + "documentation":"

Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You can specify between 1 and 20 instance types.

If not provided, Amazon EC2 Auto Scaling will use the instance type specified in the launch template to launch instances.

" } }, "documentation":"

Describes a launch template and overrides.

The overrides are used to override the instance type specified by the launch template with multiple instance types that can be used to launch On-Demand Instances and Spot Instances.

When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

" @@ -2574,32 +2773,32 @@ "members":{ "InstanceType":{ "shape":"XmlStringMaxLen255", - "documentation":"

The instance type.

For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The instance type. You must use an instance type that is supported in your requested Region and Availability Zones.

For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.

" }, "WeightedCapacity":{ "shape":"XmlStringMaxLen32", "documentation":"

The number of capacity units, which gives the instance type a proportional weight to other instance types. For example, larger instance types are generally weighted more than smaller instance types. These are the same units that you chose to set the desired capacity in terms of instances, or a performance attribute such as vCPUs, memory, or I/O.

For more information, see Instance Weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

Valid Range: Minimum value of 1. Maximum value of 999.

" } }, - "documentation":"

Describes an override for a launch template.

" + "documentation":"

Describes an override for a launch template. Currently, the only supported override is instance type.

The maximum number of instance type overrides that can be associated with an Auto Scaling group is 20.

" }, "LaunchTemplateSpecification":{ "type":"structure", "members":{ "LaunchTemplateId":{ "shape":"XmlStringMaxLen255", - "documentation":"

The ID of the launch template. You must specify either a template ID or a template name.

" + "documentation":"

The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.

You must specify either a template ID or a template name.

" }, "LaunchTemplateName":{ "shape":"LaunchTemplateName", - "documentation":"

The name of the launch template. You must specify either a template name or a template ID.

" + "documentation":"

The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.

You must specify either a template ID or a template name.

" }, "Version":{ "shape":"XmlStringMaxLen255", - "documentation":"

The version number, $Latest, or $Default. If the value is $Latest, Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default, Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default.

" + "documentation":"

The version number, $Latest, or $Default. To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API.

If the value is $Latest, Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default, Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default.

" } }, - "documentation":"

Describes a launch template and the launch template version.

The launch template that is specified must be configured for use with an Auto Scaling group. For more information, see Creating a Launch Template for an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Describes the Amazon EC2 launch template and the launch template version that can be used by an Auto Scaling group to configure Amazon EC2 instances.

The launch template that is specified must be configured for use with an Auto Scaling group. For more information, see Creating a Launch Template for an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleActionResult":{"type":"string"}, "LifecycleActionToken":{ @@ -2647,7 +2846,7 @@ "documentation":"

Defines the action the Auto Scaling group should take when the lifecycle hook timeout elapses or if an unexpected failure occurs. The possible values are CONTINUE and ABANDON.

" } }, - "documentation":"

Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or terminates instances. Used in response to DescribeLifecycleHooks.

" + "documentation":"

Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you want to perform an action whenever it launches instances or terminates instances.

" }, "LifecycleHookNames":{ "type":"list", @@ -2690,7 +2889,7 @@ "documentation":"

The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target, for example, an Amazon SNS topic or an Amazon SQS queue.

" } }, - "documentation":"

Describes a lifecycle hook. Used in combination with CreateAutoScalingGroup.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for creating a lifecycle hook for an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using RecordLifecycleActionHeartbeat.

  5. If you finish before the timeout period ends, complete the lifecycle action using CompleteLifecycleAction.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. You can modify an existing lifecycle hook or create new lifecycle hooks using PutLifecycleHook. If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook.

" + "documentation":"

Describes information used to specify a lifecycle hook for an Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for creating a lifecycle hook for an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

" }, "LifecycleHookSpecifications":{ "type":"list", @@ -2727,7 +2926,7 @@ "documentation":"

" } }, - "documentation":"

You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits.

", + "documentation":"

You have already reached a limit for your Amazon EC2 Auto Scaling resources (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). For more information, see DescribeAccountLimits in the Amazon EC2 Auto Scaling API Reference.

", "error":{ "code":"LimitExceeded", "httpStatusCode":400, @@ -2873,14 +3072,14 @@ "members":{ "LaunchTemplate":{ "shape":"LaunchTemplate", - "documentation":"

The launch template and instance types (overrides).

This parameter must be specified when creating a mixed instances policy.

" + "documentation":"

The launch template and instance types (overrides).

Required when creating a mixed instances policy.

" }, "InstancesDistribution":{ "shape":"InstancesDistribution", "documentation":"

The instances distribution to use.

If you leave this parameter unspecified, the value for each parameter in InstancesDistribution uses a default value.

" } }, - "documentation":"

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level parameter instead of a launch configuration or template. For more information, see CreateAutoScalingGroup and UpdateAutoScalingGroup.

" + "documentation":"

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level parameter instead of a launch configuration or launch template. For more information, see CreateAutoScalingGroup and UpdateAutoScalingGroup.

" }, "MonitoringEnabled":{"type":"boolean"}, "NoDevice":{"type":"boolean"}, @@ -2966,7 +3165,7 @@ }, "ResourceLabel":{ "shape":"XmlStringMaxLen1023", - "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Auto Scaling group.

The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where

  • app/load-balancer-name/load-balancer-id is the final portion of the load balancer ARN, and

  • targetgroup/target-group-name/target-group-id is the final portion of the target group ARN.

" + "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Auto Scaling group.

Elastic Load Balancing sends data about your load balancers to Amazon CloudWatch. CloudWatch collects the data and specifies the format to use to access the data. The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where

  • app/load-balancer-name/load-balancer-id is the final portion of the load balancer ARN, and

  • targetgroup/target-group-name/target-group-id is the final portion of the target group ARN.

To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers API operation. To find the ARN for the target group, use the DescribeTargetGroups API operation.

" } }, "documentation":"

Represents a predefined metric for a target tracking scaling policy to use with Amazon EC2 Auto Scaling.

" @@ -2981,7 +3180,7 @@ "members":{ "ProcessName":{ "shape":"XmlStringMaxLen255", - "documentation":"

One of the following processes:

  • Launch

  • Terminate

  • AddToLoadBalancer

  • AlarmNotification

  • AZRebalance

  • HealthCheck

  • ReplaceUnhealthy

  • ScheduledActions

" + "documentation":"

One of the following processes:

  • Launch

  • Terminate

  • AddToLoadBalancer

  • AlarmNotification

  • AZRebalance

  • HealthCheck

  • InstanceRefresh

  • ReplaceUnhealthy

  • ScheduledActions

" } }, "documentation":"

Describes a process type.

For more information, see Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

" @@ -3024,11 +3223,11 @@ }, "LifecycleTransition":{ "shape":"LifecycleTransition", - "documentation":"

The instance state to which you want to attach the lifecycle hook. The valid values are:

  • autoscaling:EC2_INSTANCE_LAUNCHING

  • autoscaling:EC2_INSTANCE_TERMINATING

Conditional: This parameter is required for new lifecycle hooks, but optional when updating existing hooks.

" + "documentation":"

The instance state to which you want to attach the lifecycle hook. The valid values are:

  • autoscaling:EC2_INSTANCE_LAUNCHING

  • autoscaling:EC2_INSTANCE_TERMINATING

Required for new lifecycle hooks, but optional when updating existing hooks.

" }, "RoleARN":{ "shape":"ResourceName", - "documentation":"

The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target, for example, an Amazon SNS topic or an Amazon SQS queue.

Conditional: This parameter is required for new lifecycle hooks, but optional when updating existing hooks.

" + "documentation":"

The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target, for example, an Amazon SNS topic or an Amazon SQS queue.

Required for new lifecycle hooks, but optional when updating existing hooks.

" }, "NotificationTargetARN":{ "shape":"NotificationTargetResourceName", @@ -3040,7 +3239,7 @@ }, "HeartbeatTimeout":{ "shape":"HeartbeatTimeout", - "documentation":"

The maximum time, in seconds, that can elapse before the lifecycle hook times out. The range is from 30 to 7200 seconds. The default value is 3600 seconds (1 hour).

If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the action that you specified in the DefaultResult parameter. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.

" + "documentation":"

The maximum time, in seconds, that can elapse before the lifecycle hook times out. The range is from 30 to 7200 seconds. The default value is 3600 seconds (1 hour).

If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the action that you specified in the DefaultResult parameter. You can prevent the lifecycle hook from timing out by calling the RecordLifecycleActionHeartbeat API.

" }, "DefaultResult":{ "shape":"LifecycleActionResult", @@ -3066,7 +3265,7 @@ }, "NotificationTypes":{ "shape":"AutoScalingNotificationTypes", - "documentation":"

The type of event that causes the notification to be sent. For more information about notification types supported by Amazon EC2 Auto Scaling, see DescribeAutoScalingNotificationTypes.

" + "documentation":"

The type of event that causes the notification to be sent. To query the notification types supported by Amazon EC2 Auto Scaling, call the DescribeAutoScalingNotificationTypes API.

" } } }, @@ -3087,11 +3286,11 @@ }, "PolicyType":{ "shape":"XmlStringMaxLen64", - "documentation":"

The policy type. The valid values are SimpleScaling, StepScaling, and TargetTrackingScaling. If the policy type is null, the value is treated as SimpleScaling.

" + "documentation":"

One of the following policy types:

  • TargetTrackingScaling

  • StepScaling

  • SimpleScaling (default)

" }, "AdjustmentType":{ "shape":"XmlStringMaxLen255", - "documentation":"

Specifies whether the ScalingAdjustment parameter is an absolute number or a percentage of the current capacity. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

Valid only if the policy type is StepScaling or SimpleScaling. For more information, see Scaling Adjustment Types in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Specifies how the scaling adjustment is interpreted (either an absolute number or a percentage). The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

Required if the policy type is StepScaling or SimpleScaling. For more information, see Scaling Adjustment Types in the Amazon EC2 Auto Scaling User Guide.

" }, "MinAdjustmentStep":{ "shape":"MinAdjustmentStep", @@ -3099,15 +3298,15 @@ }, "MinAdjustmentMagnitude":{ "shape":"MinAdjustmentMagnitude", - "documentation":"

The minimum number of instances to scale. If the value of AdjustmentType is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity of the Auto Scaling group by at least this many instances. Otherwise, the error is ValidationError.

This property replaces the MinAdjustmentStep property. For example, suppose that you create a step scaling policy to scale out an Auto Scaling group by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the group has 4 instances and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Amazon EC2 Auto Scaling scales out the group by 2 instances.

Valid only if the policy type is SimpleScaling or StepScaling.

" + "documentation":"

The minimum value to scale by when the adjustment type is PercentChangeInCapacity. For example, suppose that you create a step scaling policy to scale out an Auto Scaling group by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the group has 4 instances and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude of 2, Amazon EC2 Auto Scaling scales out the group by 2 instances.

Valid only if the policy type is StepScaling or SimpleScaling. For more information, see Scaling Adjustment Types in the Amazon EC2 Auto Scaling User Guide.

Some Auto Scaling groups use instance weights. In this case, set the MinAdjustmentMagnitude to a value that is at least as large as your largest instance weight.

" }, "ScalingAdjustment":{ "shape":"PolicyIncrement", - "documentation":"

The amount by which a simple scaling policy scales the Auto Scaling group in response to an alarm breach. The adjustment is based on the value that you specified in the AdjustmentType parameter (either an absolute number or a percentage). A positive value adds to the current capacity and a negative value subtracts from the current capacity. For exact capacity, you must specify a positive value.

Conditional: If you specify SimpleScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)

" + "documentation":"

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity. For exact capacity, you must specify a positive value.

Required if the policy type is SimpleScaling. (Not used with any other policy type.)

" }, "Cooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes before any further dynamic scaling activities can start. If this parameter is not specified, the default cooldown period for the group applies.

Valid only if the policy type is SimpleScaling. For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The duration of the policy's cooldown period, in seconds. When a cooldown period is specified here, it overrides the default cooldown period defined for the Auto Scaling group.

Valid only if the policy type is SimpleScaling. For more information, see Scaling Cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "MetricAggregationType":{ "shape":"XmlStringMaxLen32", @@ -3115,15 +3314,15 @@ }, "StepAdjustments":{ "shape":"StepAdjustments", - "documentation":"

A set of adjustments that enable you to scale based on the size of the alarm breach.

Conditional: If you specify StepScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)

" + "documentation":"

A set of adjustments that enable you to scale based on the size of the alarm breach.

Required if the policy type is StepScaling. (Not used with any other policy type.)

" }, "EstimatedInstanceWarmup":{ "shape":"EstimatedInstanceWarmup", - "documentation":"

The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. The default is to use the value specified for the default cooldown period for the group.

Valid only if the policy type is StepScaling or TargetTrackingScaling.

" + "documentation":"

The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. If not provided, the default is to use the value from the default cooldown period for the Auto Scaling group.

Valid only if the policy type is TargetTrackingScaling or StepScaling.

" }, "TargetTrackingConfiguration":{ "shape":"TargetTrackingConfiguration", - "documentation":"

A target tracking scaling policy. Includes support for predefined or customized metrics.

For more information, see TargetTrackingConfiguration in the Amazon EC2 Auto Scaling API Reference.

Conditional: If you specify TargetTrackingScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)

" + "documentation":"

A target tracking scaling policy. Includes support for predefined or customized metrics.

The following predefined metrics are available:

  • ASGAverageCPUUtilization

  • ASGAverageNetworkIn

  • ASGAverageNetworkOut

  • ALBRequestCountPerTarget

If you specify ALBRequestCountPerTarget for the metric, you must specify the ResourceLabel parameter with the PredefinedMetricSpecification.

For more information, see TargetTrackingConfiguration in the Amazon EC2 Auto Scaling API Reference.

Required if the policy type is TargetTrackingScaling.

" }, "Enabled":{ "shape":"ScalingPolicyEnabled", @@ -3164,15 +3363,15 @@ }, "MinSize":{ "shape":"AutoScalingGroupMinSize", - "documentation":"

The minimum number of instances in the Auto Scaling group.

" + "documentation":"

The minimum size of the Auto Scaling group.

" }, "MaxSize":{ "shape":"AutoScalingGroupMaxSize", - "documentation":"

The maximum number of instances in the Auto Scaling group.

" + "documentation":"

The maximum size of the Auto Scaling group.

" }, "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", - "documentation":"

The number of EC2 instances that should be running in the Auto Scaling group.

" + "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain. It can scale beyond this capacity if you add more scaling conditions.

" } } }, @@ -3206,6 +3405,28 @@ } } }, + "RefreshInstanceWarmup":{ + "type":"integer", + "min":0 + }, + "RefreshPreferences":{ + "type":"structure", + "members":{ + "MinHealthyPercentage":{ + "shape":"IntPercent", + "documentation":"

The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group (rounded up to the nearest integer). The default is 90.

" + }, + "InstanceWarmup":{ + "shape":"RefreshInstanceWarmup", + "documentation":"

The number of seconds until a newly launched instance is configured and ready to use. During this time, Amazon EC2 Auto Scaling does not immediately move on to the next replacement. The default is to use the value for the health check grace period defined for the group.

" + } + }, + "documentation":"

Describes information used to start an instance refresh.

" + }, + "RefreshStrategy":{ + "type":"string", + "enum":["Rolling"] + }, "ResourceContentionFault":{ "type":"structure", "members":{ @@ -3298,11 +3519,11 @@ }, "PolicyType":{ "shape":"XmlStringMaxLen64", - "documentation":"

The policy type. The valid values are SimpleScaling, StepScaling, and TargetTrackingScaling.

" + "documentation":"

One of the following policy types:

  • TargetTrackingScaling

  • StepScaling

  • SimpleScaling (default)

For more information, see Target Tracking Scaling Policies and Step and Simple Scaling Policies in the Amazon EC2 Auto Scaling User Guide.

" }, "AdjustmentType":{ "shape":"XmlStringMaxLen255", - "documentation":"

The adjustment type, which specifies how ScalingAdjustment is interpreted. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

" + "documentation":"

Specifies how the scaling adjustment is interpreted (either an absolute number or a percentage). The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

" }, "MinAdjustmentStep":{ "shape":"MinAdjustmentStep", @@ -3310,7 +3531,7 @@ }, "MinAdjustmentMagnitude":{ "shape":"MinAdjustmentMagnitude", - "documentation":"

The minimum number of instances to scale. If the value of AdjustmentType is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity of the Auto Scaling group by at least this many instances. Otherwise, the error is ValidationError.

" + "documentation":"

The minimum value to scale by when the adjustment type is PercentChangeInCapacity.

" }, "ScalingAdjustment":{ "shape":"PolicyIncrement", @@ -3318,7 +3539,7 @@ }, "Cooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes before any further dynamic scaling activities can start.

" + "documentation":"

The duration of the policy's cooldown period, in seconds.

" }, "StepAdjustments":{ "shape":"StepAdjustments", @@ -3358,7 +3579,7 @@ }, "ScalingProcesses":{ "shape":"ProcessNames", - "documentation":"

One or more of the following processes. If you omit this parameter, all processes are specified.

  • Launch

  • Terminate

  • HealthCheck

  • ReplaceUnhealthy

  • AZRebalance

  • AlarmNotification

  • ScheduledActions

  • AddToLoadBalancer

" + "documentation":"

One or more of the following processes:

  • Launch

  • Terminate

  • AddToLoadBalancer

  • AlarmNotification

  • AZRebalance

  • HealthCheck

  • InstanceRefresh

  • ReplaceUnhealthy

  • ScheduledActions

If you omit this parameter, all processes are specified.

" } } }, @@ -3412,18 +3633,18 @@ }, "MinSize":{ "shape":"AutoScalingGroupMinSize", - "documentation":"

The minimum number of instances in the Auto Scaling group.

" + "documentation":"

The minimum size of the Auto Scaling group.

" }, "MaxSize":{ "shape":"AutoScalingGroupMaxSize", - "documentation":"

The maximum number of instances in the Auto Scaling group.

" + "documentation":"

The maximum size of the Auto Scaling group.

" }, "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", - "documentation":"

The number of instances you prefer to maintain in the group.

" + "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain.

" } }, - "documentation":"

Describes a scheduled scaling action. Used in response to DescribeScheduledActions.

" + "documentation":"

Describes a scheduled scaling action.

" }, "ScheduledUpdateGroupActionRequest":{ "type":"structure", @@ -3447,18 +3668,18 @@ }, "MinSize":{ "shape":"AutoScalingGroupMinSize", - "documentation":"

The minimum number of instances in the Auto Scaling group.

" + "documentation":"

The minimum size of the Auto Scaling group.

" }, "MaxSize":{ "shape":"AutoScalingGroupMaxSize", - "documentation":"

The maximum number of instances in the Auto Scaling group.

" + "documentation":"

The maximum size of the Auto Scaling group.

" }, "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", - "documentation":"

The number of EC2 instances that should be running in the group.

" + "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group after the scheduled action runs and the capacity it attempts to maintain.

" } }, - "documentation":"

Describes one or more scheduled scaling action updates for a specified Auto Scaling group. Used in combination with BatchPutScheduledUpdateGroupAction.

When updating a scheduled scaling action, all optional parameters are left unchanged if not specified.

" + "documentation":"

Describes information used for one or more scheduled scaling action updates in a BatchPutScheduledUpdateGroupAction operation.

When updating a scheduled scaling action, all optional parameters are left unchanged if not specified.

" }, "ScheduledUpdateGroupActionRequests":{ "type":"list", @@ -3498,7 +3719,7 @@ }, "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", - "documentation":"

The number of EC2 instances that should be running in the Auto Scaling group.

" + "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group after this operation completes and the capacity it attempts to maintain.

" }, "HonorCooldown":{ "shape":"HonorCooldown", @@ -3523,7 +3744,7 @@ }, "ShouldRespectGracePeriod":{ "shape":"ShouldRespectGracePeriod", - "documentation":"

If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call respects the grace period. Set this to False, to have the call not respect the grace period associated with the group.

For more information about the health check grace period, see CreateAutoScalingGroup.

" + "documentation":"

If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call respects the grace period. Set this to False, to have the call not respect the grace period associated with the group.

For more information about the health check grace period, see CreateAutoScalingGroup in the Amazon EC2 Auto Scaling API Reference.

" } } }, @@ -3562,6 +3783,33 @@ "max":255, "min":1 }, + "StartInstanceRefreshAnswer":{ + "type":"structure", + "members":{ + "InstanceRefreshId":{ + "shape":"XmlStringMaxLen255", + "documentation":"

A unique ID for tracking the progress of the request.

" + } + } + }, + "StartInstanceRefreshType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + }, + "Strategy":{ + "shape":"RefreshStrategy", + "documentation":"

The strategy to use for the instance refresh. The only valid value is Rolling.

A rolling update is an update that is applied to all instances in an Auto Scaling group until all instances have been updated. A rolling update can fail due to failed health checks or if instances are on standby or are protected from scale in. If the rolling update process fails, any instances that were already replaced are not rolled back to their previous configuration.

" + }, + "Preferences":{ + "shape":"RefreshPreferences", + "documentation":"

Set of preferences associated with the instance refresh request.

If not provided, the default values are used. For MinHealthyPercentage, the default value is 90. For InstanceWarmup, the default is to use the value specified for the health check grace period for the Auto Scaling group.

For more information, see RefreshPreferences in the Amazon EC2 Auto Scaling API Reference.

" + } + } + }, "StepAdjustment":{ "type":"structure", "required":["ScalingAdjustment"], @@ -3579,7 +3827,7 @@ "documentation":"

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

" } }, - "documentation":"

Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm. Used in combination with PutScalingPolicy.

For the following examples, suppose that you have an alarm with a breach threshold of 50:

  • To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

  • To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

There are a few rules for the step adjustments for your step policy:

  • The ranges of your step adjustments can't overlap or have a gap.

  • At most, one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

  • At most, one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

  • The upper and lower bound can't be null in the same step adjustment.

" + "documentation":"

Describes information used to create a step adjustment for a step scaling policy.

For the following examples, suppose that you have an alarm with a breach threshold of 50:

  • To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

  • To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

There are a few rules for the step adjustments for your step policy:

  • The ranges of your step adjustments can't overlap or have a gap.

  • At most, one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

  • At most, one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

  • The upper and lower bound can't be null in the same step adjustment.

For more information, see Step Adjustments in the Amazon EC2 Auto Scaling User Guide.

" }, "StepAdjustments":{ "type":"list", @@ -3597,7 +3845,7 @@ "documentation":"

The reason that the process was suspended.

" } }, - "documentation":"

Describes an automatic scaling process that has been suspended. For more information, see ProcessType.

" + "documentation":"

Describes an automatic scaling process that has been suspended.

For more information, see Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

" }, "SuspendedProcesses":{ "type":"list", @@ -3764,15 +4012,15 @@ }, "MaxSize":{ "shape":"AutoScalingGroupMaxSize", - "documentation":"

The maximum size of the Auto Scaling group.

" + "documentation":"

The maximum size of the Auto Scaling group.

With a mixed instances policy that uses instance weighting, Amazon EC2 Auto Scaling may need to go above MaxSize to meet your capacity requirements. In this event, Amazon EC2 Auto Scaling will never go above MaxSize by more than your maximum instance weight (weights that define how many capacity units each instance contributes to the capacity of the group).

" }, "DesiredCapacity":{ "shape":"AutoScalingGroupDesiredCapacity", - "documentation":"

The number of EC2 instances that should be running in the Auto Scaling group. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.

" + "documentation":"

The desired capacity is the initial capacity of the Auto Scaling group after this operation completes and the capacity it attempts to maintain.

This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group.

" }, "DefaultCooldown":{ "shape":"Cooldown", - "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300. This cooldown period is not used when a scaling-specific cooldown is specified.

Cooldown periods are not supported for target tracking scaling policies, step scaling policies, or scheduled scaling. For more information, see Scaling Cooldowns in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. The default value is 300.

This setting applies when using simple scaling policies, but not when using other scaling policies or scheduled scaling. For more information, see Scaling Cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", @@ -3784,7 +4032,7 @@ }, "HealthCheckGracePeriod":{ "shape":"HealthCheckGracePeriod", - "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default value is 0.

For more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide.

Conditional: This parameter is required if you are adding an ELB health check.

" + "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service. The default value is 0.

For more information, see Health Check Grace Period in the Amazon EC2 Auto Scaling User Guide.

Required if you are adding an ELB health check.

" }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", @@ -3808,7 +4056,7 @@ }, "MaxInstanceLifetime":{ "shape":"MaxInstanceLifetime", - "documentation":"

The maximum amount of time, in seconds, that an instance can be in service.

For more information, see Replacing Auto Scaling Instances Based on Maximum Instance Lifetime in the Amazon EC2 Auto Scaling User Guide.

Valid Range: Minimum value of 604800.

" + "documentation":"

The maximum amount of time, in seconds, that an instance can be in service. The default is null.

This parameter is optional, but if you specify a value for it, you must specify a value of at least 604,800 seconds (7 days). To clear a previously set value, specify a new value of 0.

For more information, see Replacing Auto Scaling Instances Based on Maximum Instance Lifetime in the Amazon EC2 Auto Scaling User Guide.

Valid Range: Minimum value of 0.

" } } }, diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index 8f7ec6536b4e..3eab2c1da49a 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 5eb4028de6fa..054ba7dca02c 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/backup/src/main/resources/codegen-resources/service-2.json b/services/backup/src/main/resources/codegen-resources/service-2.json index a80b024a3f4a..73b18ebb554f 100644 --- a/services/backup/src/main/resources/codegen-resources/service-2.json +++ b/services/backup/src/main/resources/codegen-resources/service-2.json @@ -228,7 +228,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns information about a saved resource, including the last time it was backed-up, its Amazon Resource Name (ARN), and the AWS service type of the saved resource.

", + "documentation":"

Returns information about a saved resource, including the last time it was backed up, its Amazon Resource Name (ARN), and the AWS service type of the saved resource.

", "idempotent":true }, "DescribeRecoveryPoint":{ @@ -248,6 +248,19 @@ "documentation":"

Returns metadata associated with a recovery point, including ID, status, encryption, and lifecycle.

", "idempotent":true }, + "DescribeRegionSettings":{ + "name":"DescribeRegionSettings", + "http":{ + "method":"GET", + "requestUri":"/account-settings" + }, + "input":{"shape":"DescribeRegionSettingsInput"}, + "output":{"shape":"DescribeRegionSettingsOutput"}, + "errors":[ + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns the current service opt-in settings for the Region. If the service has a value set to true, AWS Backup attempts to protect that service's resources in this Region, when included in an on-demand backup or scheduled backup plan. If the value is set to false for a service, AWS Backup does not attempt to protect that service's resources in this Region.

" + }, "DescribeRestoreJob":{ "name":"DescribeRestoreJob", "http":{ @@ -422,7 +435,6 @@ "output":{"shape":"ListBackupJobsOutput"}, "errors":[ {"shape":"InvalidParameterValueException"}, - {"shape":"InvalidRequestException"}, {"shape":"ServiceUnavailableException"} ], "documentation":"

Returns metadata about your backup jobs.

", @@ -606,7 +618,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns a list of key-value pairs assigned to a target recovery point, backup plan, or backup vault.

", + "documentation":"

Returns a list of key-value pairs assigned to a target recovery point, backup plan, or backup vault.

ListTags are currently only supported with Amazon EFS backups.

", "idempotent":true }, "PutBackupVaultAccessPolicy":{ @@ -776,10 +788,28 @@ ], "documentation":"

Sets the transition lifecycle of a recovery point.

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

", "idempotent":true + }, + "UpdateRegionSettings":{ + "name":"UpdateRegionSettings", + "http":{ + "method":"PUT", + "requestUri":"/account-settings" + }, + "input":{"shape":"UpdateRegionSettingsInput"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

Updates the current service opt-in settings for the Region. If the service has a value set to true, AWS Backup attempts to protect that service's resources in this Region, when included in an on-demand backup or scheduled backup plan. If the value is set to false for a service, AWS Backup does not attempt to protect that service's resources in this Region.

" } }, "shapes":{ "ARN":{"type":"string"}, + "AccountId":{ + "type":"string", + "pattern":"^[0-9]{12}$" + }, "AlreadyExistsException":{ "type":"structure", "members":{ @@ -808,6 +838,10 @@ "BackupJob":{ "type":"structure", "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID that owns the backup job.

" + }, "BackupJobId":{ "shape":"string", "documentation":"

Uniquely identifies a request to AWS Backup to back up a resource.

" @@ -870,7 +904,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of AWS resource to be backed-up; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + "documentation":"

The type of AWS resource to be backed up; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" }, "BytesTransferred":{ "shape":"Long", @@ -909,7 +943,7 @@ }, "Rules":{ "shape":"BackupRules", - "documentation":"

An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources.

" + "documentation":"

An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources.

" } }, "documentation":"

Contains an optional backup plan display name and an array of BackupRule objects, each of which specifies a backup rule. Each rule in a backup plan is a separate scheduled task and can back up a different selection of AWS resources.

" @@ -923,14 +957,14 @@ "members":{ "BackupPlanName":{ "shape":"BackupPlanName", - "documentation":"

The display name of a backup plan.

" + "documentation":"

The optional display name of a backup plan.

" }, "Rules":{ "shape":"BackupRulesInput", "documentation":"

An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources.

" } }, - "documentation":"

Contains an optional backup plan display name and an array of BackupRule objects, each of which specifies a backup rule. Each rule in a backup plan is a separate scheduled task and can back up a different selection of AWS resources.

" + "documentation":"

Contains an optional backup plan display name and an array of BackupRule objects, each of which specifies a backup rule. Each rule in a backup plan is a separate scheduled task and can back up a different selection of AWS resources.

" }, "BackupPlanName":{"type":"string"}, "BackupPlanTemplatesList":{ @@ -1018,11 +1052,11 @@ }, "StartWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

An optional value that specifies a period of time in minutes after a backup is scheduled before a job is canceled if it doesn't start successfully.

" + "documentation":"

A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional.

" }, "CompletionWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

A value in minutes after a backup job is successfully started before it must be completed or it is canceled by AWS Backup. This value is optional.

" + "documentation":"

A value in minutes after a backup job is successfully started before it must be completed or it will be canceled by AWS Backup. This value is optional.

" }, "Lifecycle":{ "shape":"Lifecycle", @@ -1052,7 +1086,7 @@ "members":{ "RuleName":{ "shape":"BackupRuleName", - "documentation":"

>An optional display name for a backup rule.

" + "documentation":"

An optional display name for a backup rule.

" }, "TargetBackupVaultName":{ "shape":"BackupVaultName", @@ -1064,15 +1098,15 @@ }, "StartWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

The amount of time in minutes before beginning a backup.

" + "documentation":"

A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional.

" }, "CompletionWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

The amount of time AWS Backup attempts a backup before canceling the job and returning an error.

" + "documentation":"

A value in minutes after a backup job is successfully started before it must be completed or it will be canceled by AWS Backup. This value is optional.

" }, "Lifecycle":{ "shape":"Lifecycle", - "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup will transition and expire backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days”. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup will transition and expire backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" }, "RecoveryPointTags":{ "shape":"Tags", @@ -1236,7 +1270,7 @@ "documentation":"

A timestamp that specifies when to delete a recovery point.

" } }, - "documentation":"

Contains DeleteAt and MoveToColdStorageAt timestamps, which are used to specify a lifecycle for a recovery point.

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + "documentation":"

Contains DeleteAt and MoveToColdStorageAt timestamps, which are used to specify a lifecycle for a recovery point.

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" }, "Condition":{ "type":"structure", @@ -1274,7 +1308,7 @@ "Lifecycle":{"shape":"Lifecycle"}, "DestinationBackupVaultArn":{ "shape":"ARN", - "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. For example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. For example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" } }, "documentation":"

The details of the copy operation.

" @@ -1286,45 +1320,49 @@ "CopyJob":{ "type":"structure", "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID that owns the copy job.

" + }, "CopyJobId":{ "shape":"string", - "documentation":"

Uniquely identifies a request to AWS Backup to copy a resource.

" + "documentation":"

Uniquely identifies a copy job.

" }, "SourceBackupVaultArn":{ "shape":"ARN", - "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a source copy vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a source copy vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" }, "SourceRecoveryPointArn":{ "shape":"ARN", - "documentation":"

An ARN that uniquely identifies a source recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + "documentation":"

An ARN that uniquely identifies a source recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" }, "DestinationBackupVaultArn":{ "shape":"ARN", - "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a destination copy vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a destination copy vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" }, "DestinationRecoveryPointArn":{ "shape":"ARN", - "documentation":"

An ARN that uniquely identifies a destination recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + "documentation":"

An ARN that uniquely identifies a destination recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" }, "ResourceArn":{ "shape":"ARN", - "documentation":"

The type of AWS resource to be copied; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + "documentation":"

The AWS resource to be copied; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" }, "CreationDate":{ "shape":"timestamp", - "documentation":"

The date and time a copy job is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + "documentation":"

The date and time a copy job is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" }, "CompletionDate":{ "shape":"timestamp", - "documentation":"

The date and time a job to create a copy job is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + "documentation":"

The date and time a copy job is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" }, "State":{ "shape":"CopyJobState", - "documentation":"

The current state of a resource recovery point.

" + "documentation":"

The current state of a copy job.

" }, "StatusMessage":{ "shape":"string", - "documentation":"

A detailed message explaining the status of the job that to copy a resource.

" + "documentation":"

A detailed message explaining the status of the job to copy a resource.

" }, "BackupSizeInBytes":{ "shape":"Long", @@ -1332,12 +1370,12 @@ }, "IamRoleArn":{ "shape":"IAMRoleArn", - "documentation":"

Specifies the IAM role ARN used to copy the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + "documentation":"

Specifies the IAM role ARN used to copy the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" }, "CreatedBy":{"shape":"RecoveryPointCreator"}, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of AWS resource to be copied; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + "documentation":"

The type of AWS resource to be copied; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" } }, "documentation":"

Contains detailed information about a copy job.

" @@ -1390,7 +1428,7 @@ }, "VersionId":{ "shape":"string", - "documentation":"

Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1024 bytes long. They cannot be edited.

" + "documentation":"

Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most 1,024 bytes long. They cannot be edited.

" } } }, @@ -1501,7 +1539,7 @@ }, "DeletionDate":{ "shape":"timestamp", - "documentation":"

The date and time a backup plan is deleted, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + "documentation":"

The date and time a backup plan is deleted, in Unix format and Coordinated Universal Time (UTC). The value of DeletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" }, "VersionId":{ "shape":"string", @@ -1548,7 +1586,7 @@ "members":{ "BackupVaultName":{ "shape":"string", - "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and theAWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", "location":"uri", "locationName":"backupVaultName" } @@ -1620,6 +1658,10 @@ "DescribeBackupJobOutput":{ "type":"structure", "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

Returns the account ID that owns the backup job.

" + }, "BackupJobId":{ "shape":"string", "documentation":"

Uniquely identifies a request to AWS Backup to back up a resource.

" @@ -1646,7 +1688,7 @@ }, "CompletionDate":{ "shape":"timestamp", - "documentation":"

The date and time that a job to create a backup job is completed, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + "documentation":"

The date and time that a job to create a backup job is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" }, "State":{ "shape":"BackupJobState", @@ -1674,7 +1716,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of AWS resource to be backed-up; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" + "documentation":"

The type of AWS resource to be backed up; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" }, "BytesTransferred":{ "shape":"Long", @@ -1737,7 +1779,7 @@ "members":{ "CopyJobId":{ "shape":"string", - "documentation":"

Uniquely identifies a request to AWS Backup to copy a resource.

", + "documentation":"

Uniquely identifies a copy job.

", "location":"uri", "locationName":"copyJobId" } @@ -1875,6 +1917,20 @@ } } }, + "DescribeRegionSettingsInput":{ + "type":"structure", + "members":{ + } + }, + "DescribeRegionSettingsOutput":{ + "type":"structure", + "members":{ + "ResourceTypeOptInPreference":{ + "shape":"ResourceTypeOptInPreference", + "documentation":"

Returns a list of all services along with the opt-in preferences in the region.

" + } + } + }, "DescribeRestoreJobInput":{ "type":"structure", "required":["RestoreJobId"], @@ -1890,6 +1946,10 @@ "DescribeRestoreJobOutput":{ "type":"structure", "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

Returns the account ID that owns the restore job.

" + }, "RestoreJobId":{ "shape":"string", "documentation":"

Uniquely identifies the job that restores a recovery point.

" @@ -1912,7 +1972,7 @@ }, "StatusMessage":{ "shape":"string", - "documentation":"

A detailed message explaining the status of a job to restore a recovery point.

" + "documentation":"

A message showing the status of a job to restore a recovery point.

" }, "PercentDone":{ "shape":"string", @@ -1933,6 +1993,10 @@ "CreatedResourceArn":{ "shape":"ARN", "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a resource whose recovery point is being restored. The format of the ARN depends on the resource type of the backed-up resource.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

Returns metadata associated with a restore job listed by resource type.

" } } }, @@ -2044,7 +2108,7 @@ }, "DeletionDate":{ "shape":"timestamp", - "documentation":"

The date and time that a backup plan is deleted, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + "documentation":"

The date and time that a backup plan is deleted, in Unix format and Coordinated Universal Time (UTC). The value of DeletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" }, "LastExecutionDate":{ "shape":"timestamp", @@ -2203,7 +2267,7 @@ "members":{ "ResourceTypes":{ "shape":"ResourceTypes", - "documentation":"

Contains a string with the supported AWS resource types:

  • EBS for Amazon Elastic Block Store

  • Storage Gateway for AWS Storage Gateway

  • RDS for Amazon Relational Database Service

  • DDB for Amazon DynamoDB

  • EFS for Amazon Elastic File System

" + "documentation":"

Contains a string with the supported AWS resource types:

  • DynamoDB for Amazon DynamoDB

  • EBS for Amazon Elastic Block Store

  • EC2 for Amazon Elastic Compute Cloud

  • EFS for Amazon Elastic File System

  • RDS for Amazon Relational Database Service

  • Storage Gateway for AWS Storage Gateway

" } } }, @@ -2243,6 +2307,7 @@ "documentation":"

Indicates that something is wrong with the input to the request. For example, a parameter is of the wrong type.

", "exception":true }, + "IsEnabled":{"type":"boolean"}, "Lifecycle":{ "type":"structure", "members":{ @@ -2255,7 +2320,7 @@ "documentation":"

Specifies the number of days after creation that a recovery point is deleted. Must be greater than 90 days plus MoveToColdStorageAfterDays.

" } }, - "documentation":"

Contains an array of Transition objects specifying how long in days before a recovery point transitions to cold storage or is deleted.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, on the console, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" + "documentation":"

Contains an array of Transition objects specifying how long in days before a recovery point transitions to cold storage or is deleted.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, on the console, the “expire after days” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

" }, "LimitExceededException":{ "type":"structure", @@ -2321,9 +2386,15 @@ }, "ByResourceType":{ "shape":"ResourceType", - "documentation":"

Returns only backup jobs for the specified resources:

  • DynamoDB for Amazon DynamoDB

  • EBS for Amazon Elastic Block Store

  • EFS for Amazon Elastic File System

  • RDS for Amazon Relational Database Service

  • Storage Gateway for AWS Storage Gateway

", + "documentation":"

Returns only backup jobs for the specified resources:

  • DynamoDB for Amazon DynamoDB

  • EBS for Amazon Elastic Block Store

  • EC2 for Amazon Elastic Compute Cloud

  • EFS for Amazon Elastic File System

  • RDS for Amazon Relational Database Service

  • Storage Gateway for AWS Storage Gateway

", "location":"querystring", "locationName":"resourceType" + }, + "ByAccountId":{ + "shape":"AccountId", + "documentation":"

The account ID to list the jobs from. Returns only backup jobs associated with the specified account ID.

", + "location":"querystring", + "locationName":"accountId" } } }, @@ -2551,15 +2622,21 @@ }, "ByResourceType":{ "shape":"ResourceType", - "documentation":"

Returns only backup jobs for the specified resources:

  • DynamoDB for Amazon DynamoDB

  • EBS for Amazon Elastic Block Store

  • EFS for Amazon Elastic File System

  • RDS for Amazon Relational Database Service

  • Storage Gateway for AWS Storage Gateway

", + "documentation":"

Returns only backup jobs for the specified resources:

  • DynamoDB for Amazon DynamoDB

  • EBS for Amazon Elastic Block Store

  • EC2 for Amazon Elastic Compute Cloud

  • EFS for Amazon Elastic File System

  • RDS for Amazon Relational Database Service

  • Storage Gateway for AWS Storage Gateway

", "location":"querystring", "locationName":"resourceType" }, "ByDestinationVaultArn":{ "shape":"string", - "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a source backup vault to copy from; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

", + "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a source backup vault to copy from; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

", "location":"querystring", "locationName":"destinationVaultArn" + }, + "ByAccountId":{ + "shape":"AccountId", + "documentation":"

The account ID to list the jobs from. Returns only copy jobs associated with the specified account ID.

", + "location":"querystring", + "locationName":"accountId" } } }, @@ -2728,6 +2805,30 @@ "documentation":"

The maximum number of items to be returned.

", "location":"querystring", "locationName":"maxResults" + }, + "ByAccountId":{ + "shape":"AccountId", + "documentation":"

The account ID to list the jobs from. Returns only restore jobs associated with the specified account ID.

", + "location":"querystring", + "locationName":"accountId" + }, + "ByCreatedBefore":{ + "shape":"timestamp", + "documentation":"

Returns only restore jobs that were created before the specified date.

", + "location":"querystring", + "locationName":"createdBefore" + }, + "ByCreatedAfter":{ + "shape":"timestamp", + "documentation":"

Returns only restore jobs that were created after the specified date.

", + "location":"querystring", + "locationName":"createdAfter" + }, + "ByStatus":{ + "shape":"RestoreJobStatus", + "documentation":"

Returns only restore jobs associated with the specified job status.

", + "location":"querystring", + "locationName":"status" } } }, @@ -3038,6 +3139,11 @@ "type":"string", "pattern":"^[a-zA-Z0-9\\-\\_\\.]{1,50}$" }, + "ResourceTypeOptInPreference":{ + "type":"map", + "key":{"shape":"ResourceType"}, + "value":{"shape":"IsEnabled"} + }, "ResourceTypes":{ "type":"list", "member":{"shape":"ResourceType"} @@ -3060,6 +3166,10 @@ "RestoreJobsListMember":{ "type":"structure", "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID that owns the restore job.

" + }, "RestoreJobId":{ "shape":"string", "documentation":"

Uniquely identifies the job that restores a recovery point.

" @@ -3103,6 +3213,10 @@ "CreatedResourceArn":{ "shape":"ARN", "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a resource. The format of the ARN depends on the resource type.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The resource type of the listed restore jobs; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.

" } }, "documentation":"

Contains metadata about a restore job.

" @@ -3151,11 +3265,11 @@ }, "StartWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

The amount of time in minutes before beginning a backup.

" + "documentation":"

A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional.

" }, "CompleteWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

The amount of time AWS Backup attempts a backup before canceling the job and returning an error.

" + "documentation":"

A value in minutes after a backup job is successfully started before it must be completed or it will be canceled by AWS Backup. This value is optional.

" }, "Lifecycle":{ "shape":"Lifecycle", @@ -3199,7 +3313,7 @@ }, "SourceBackupVaultName":{ "shape":"BackupVaultName", - "documentation":"

The name of a logical source container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens. >

" + "documentation":"

The name of a logical source container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" }, "DestinationBackupVaultArn":{ "shape":"ARN", @@ -3207,7 +3321,7 @@ }, "IamRoleArn":{ "shape":"IAMRoleArn", - "documentation":"

Specifies the IAM role ARN used to copy the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" + "documentation":"

Specifies the IAM role ARN used to copy the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access.

" }, "IdempotencyToken":{ "shape":"string", @@ -3221,11 +3335,11 @@ "members":{ "CopyJobId":{ "shape":"string", - "documentation":"

Uniquely identifies a request to AWS Backup to copy a resource.

" + "documentation":"

Uniquely identifies a copy job.

" }, "CreationDate":{ "shape":"timestamp", - "documentation":"

The date and time that a backup job is started, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. >

" + "documentation":"

The date and time that a copy job is started, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" } } }, @@ -3243,7 +3357,7 @@ }, "Metadata":{ "shape":"Metadata", - "documentation":"

A set of metadata key-value pairs. Contains information, such as a resource name, required to restore a recovery point.

You can get configuration metadata about a resource at the time it was backed-up by calling GetRecoveryPointRestoreMetadata. However, values in addition to those provided by GetRecoveryPointRestoreMetadata might be required to restore a resource. For example, you might need to provide a new resource name if the original already exists.

You need to specify specific metadata to restore an Amazon Elastic File System (Amazon EFS) instance:

  • file-system-id: ID of the Amazon EFS file system that is backed up by AWS Backup. Returned in GetRecoveryPointRestoreMetadata.

  • Encrypted: A Boolean value that, if true, specifies that the file system is encrypted. If KmsKeyId is specified, Encrypted must be set to true.

  • KmsKeyId: Specifies the AWS KMS key that is used to encrypt the restored file system.

  • PerformanceMode: Specifies the throughput mode of the file system.

  • CreationToken: A user-supplied value that ensures the uniqueness (idempotency) of the request.

  • newFileSystem: A Boolean value that, if true, specifies that the recovery point is restored to a new Amazon EFS file system.

" + "documentation":"

A set of metadata key-value pairs. Contains information, such as a resource name, required to restore a recovery point.

You can get configuration metadata about a resource at the time it was backed up by calling GetRecoveryPointRestoreMetadata. However, values in addition to those provided by GetRecoveryPointRestoreMetadata might be required to restore a resource. For example, you might need to provide a new resource name if the original already exists.

You need to specify specific metadata to restore an Amazon Elastic File System (Amazon EFS) instance:

  • file-system-id: ID of the Amazon EFS file system that is backed up by AWS Backup. Returned in GetRecoveryPointRestoreMetadata.

  • Encrypted: A Boolean value that, if true, specifies that the file system is encrypted. If KmsKeyId is specified, Encrypted must be set to true.

  • KmsKeyId: Specifies the AWS KMS key that is used to encrypt the restored file system.

  • PerformanceMode: Specifies the throughput mode of the file system.

  • CreationToken: A user-supplied value that ensures the uniqueness (idempotency) of the request.

  • newFileSystem: A Boolean value that, if true, specifies that the recovery point is restored to a new Amazon EFS file system.

" }, "IamRoleArn":{ "shape":"IAMRoleArn", @@ -3255,7 +3369,7 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

Starts a job to restore a recovery point for one of the following resources:

  • EBS for Amazon Elastic Block Store

  • Storage Gateway for AWS Storage Gateway

  • RDS for Amazon Relational Database Service

  • DDB for Amazon DynamoDB

  • EFS for Amazon Elastic File System

" + "documentation":"

Starts a job to restore a recovery point for one of the following resources:

  • DynamoDB for Amazon DynamoDB

  • EBS for Amazon Elastic Block Store

  • EC2 for Amazon Elastic Compute Cloud

  • EFS for Amazon Elastic File System

  • RDS for Amazon Relational Database Service

  • Storage Gateway for AWS Storage Gateway

" } } }, @@ -3425,6 +3539,15 @@ } } }, + "UpdateRegionSettingsInput":{ + "type":"structure", + "members":{ + "ResourceTypeOptInPreference":{ + "shape":"ResourceTypeOptInPreference", + "documentation":"

Updates the list of services along with the opt-in preferences for the region.

" + } + } + }, "WindowMinutes":{"type":"long"}, "boolean":{"type":"boolean"}, "long":{"type":"long"}, diff --git a/services/batch/pom.xml b/services/batch/pom.xml index 4ac15120fc23..b2063d09e972 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 3b5d2bbc41df..5b13dd43912a 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/chime/pom.xml b/services/chime/pom.xml index b84aa0425914..a9e27790ba5b 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chime/src/main/resources/codegen-resources/paginators-1.json b/services/chime/src/main/resources/codegen-resources/paginators-1.json index 7d55169a0377..6727698813d8 100644 --- a/services/chime/src/main/resources/codegen-resources/paginators-1.json +++ b/services/chime/src/main/resources/codegen-resources/paginators-1.json @@ -30,6 +30,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListProxySessions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListRoomMemberships": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/chime/src/main/resources/codegen-resources/service-2.json b/services/chime/src/main/resources/codegen-resources/service-2.json index 730d251ce52b..928027674434 100644 --- a/services/chime/src/main/resources/codegen-resources/service-2.json +++ b/services/chime/src/main/resources/codegen-resources/service-2.json @@ -314,7 +314,27 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new Amazon Chime SDK meeting in the specified media Region with no initial attendees. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" + "documentation":"

Creates a new Amazon Chime SDK meeting in the specified media Region with no initial attendees. For more information about specifying media Regions, see Amazon Chime SDK Media Regions in the Amazon Chime Developer Guide. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" + }, + "CreateMeetingWithAttendees":{ + "name":"CreateMeetingWithAttendees", + "http":{ + "method":"POST", + "requestUri":"/meetings?operation=create-attendees", + "responseCode":201 + }, + "input":{"shape":"CreateMeetingWithAttendeesRequest"}, + "output":{"shape":"CreateMeetingWithAttendeesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a new Amazon Chime SDK meeting in the specified media Region, with attendees. For more information about specifying media Regions, see Amazon Chime SDK Media Regions in the Amazon Chime Developer Guide. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

" }, "CreatePhoneNumberOrder":{ "name":"CreatePhoneNumberOrder", @@ -337,6 +357,26 @@ ], "documentation":"

Creates an order for phone numbers to be provisioned. Choose from Amazon Chime Business Calling and Amazon Chime Voice Connector product types. For toll-free numbers, you must use the Amazon Chime Voice Connector product type.

" }, + "CreateProxySession":{ + "name":"CreateProxySession", + "http":{ + "method":"POST", + "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions", + "responseCode":201 + }, + "input":{"shape":"CreateProxySessionRequest"}, + "output":{"shape":"CreateProxySessionResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Creates a proxy session on the specified Amazon Chime Voice Connector for the specified participant phone numbers.

" + }, "CreateRoom":{ "name":"CreateRoom", "http":{ @@ -539,6 +579,25 @@ ], "documentation":"

Moves the specified phone number into the Deletion queue. A phone number must be disassociated from any users or Amazon Chime Voice Connectors before it can be deleted.

Deleted phone numbers remain in the Deletion queue for 7 days before they are deleted permanently.

" }, + "DeleteProxySession":{ + "name":"DeleteProxySession", + "http":{ + "method":"DELETE", + "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions/{proxySessionId}", + "responseCode":204 + }, + "input":{"shape":"DeleteProxySessionRequest"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes the specified proxy session from the specified Amazon Chime Voice Connector.

" + }, "DeleteRoom":{ "name":"DeleteRoom", "http":{ @@ -597,6 +656,25 @@ ], "documentation":"

Deletes the specified Amazon Chime Voice Connector. Any phone numbers associated with the Amazon Chime Voice Connector must be disassociated from it before it can be deleted.

" }, + "DeleteVoiceConnectorEmergencyCallingConfiguration":{ + "name":"DeleteVoiceConnectorEmergencyCallingConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/voice-connectors/{voiceConnectorId}/emergency-calling-configuration", + "responseCode":204 + }, + "input":{"shape":"DeleteVoiceConnectorEmergencyCallingConfigurationRequest"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes the emergency calling configuration details from the specified Amazon Chime Voice Connector.

" + }, "DeleteVoiceConnectorGroup":{ "name":"DeleteVoiceConnectorGroup", "http":{ @@ -634,7 +712,26 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the origination settings for the specified Amazon Chime Voice Connector.

" + "documentation":"

Deletes the origination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to deleting the origination settings.

" + }, + "DeleteVoiceConnectorProxy":{ + "name":"DeleteVoiceConnectorProxy", + "http":{ + "method":"DELETE", + "requestUri":"/voice-connectors/{voiceConnectorId}/programmable-numbers/proxy", + "responseCode":204 + }, + "input":{"shape":"DeleteVoiceConnectorProxyRequest"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Deletes the proxy configuration from the specified Amazon Chime Voice Connector.

" }, "DeleteVoiceConnectorStreamingConfiguration":{ "name":"DeleteVoiceConnectorStreamingConfiguration", @@ -672,7 +769,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the termination settings for the specified Amazon Chime Voice Connector.

" + "documentation":"

Deletes the termination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to deleting the termination settings.

" }, "DeleteVoiceConnectorTerminationCredentials":{ "name":"DeleteVoiceConnectorTerminationCredentials", @@ -966,6 +1063,45 @@ ], "documentation":"

Retrieves the phone number settings for the administrator's AWS account, such as the default outbound calling name.

" }, + "GetProxySession":{ + "name":"GetProxySession", + "http":{ + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions/{proxySessionId}", + "responseCode":200 + }, + "input":{"shape":"GetProxySessionRequest"}, + "output":{"shape":"GetProxySessionResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Gets the specified proxy session details for the specified Amazon Chime Voice Connector.

" + }, + "GetRetentionSettings":{ + "name":"GetRetentionSettings", + "http":{ + "method":"GET", + "requestUri":"/accounts/{accountId}/retention-settings" + }, + "input":{"shape":"GetRetentionSettingsRequest"}, + "output":{"shape":"GetRetentionSettingsResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Gets the retention settings for the specified Amazon Chime Enterprise account. For more information about retention settings, see Managing Chat Retention Policies in the Amazon Chime Administration Guide.

" + }, "GetRoom":{ "name":"GetRoom", "http":{ @@ -1046,6 +1182,26 @@ ], "documentation":"

Retrieves details for the specified Amazon Chime Voice Connector, such as timestamps, name, outbound host, and encryption requirements.

" }, + "GetVoiceConnectorEmergencyCallingConfiguration":{ + "name":"GetVoiceConnectorEmergencyCallingConfiguration", + "http":{ + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/emergency-calling-configuration", + "responseCode":200 + }, + "input":{"shape":"GetVoiceConnectorEmergencyCallingConfigurationRequest"}, + "output":{"shape":"GetVoiceConnectorEmergencyCallingConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Gets the emergency calling configuration details for the specified Amazon Chime Voice Connector.

" + }, "GetVoiceConnectorGroup":{ "name":"GetVoiceConnectorGroup", "http":{ @@ -1106,6 +1262,26 @@ ], "documentation":"

Retrieves origination setting details for the specified Amazon Chime Voice Connector.

" }, + "GetVoiceConnectorProxy":{ + "name":"GetVoiceConnectorProxy", + "http":{ + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/programmable-numbers/proxy", + "responseCode":200 + }, + "input":{"shape":"GetVoiceConnectorProxyRequest"}, + "output":{"shape":"GetVoiceConnectorProxyResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Gets the proxy configuration details for the specified Amazon Chime Voice Connector.

" + }, "GetVoiceConnectorStreamingConfiguration":{ "name":"GetVoiceConnectorStreamingConfiguration", "http":{ @@ -1205,6 +1381,26 @@ ], "documentation":"

Lists the Amazon Chime accounts under the administrator's AWS account. You can filter accounts by account name prefix. To find out which Amazon Chime account a user belongs to, you can filter by the user's email address, which returns one account result.

" }, + "ListAttendeeTags":{ + "name":"ListAttendeeTags", + "http":{ + "method":"GET", + "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}/tags", + "responseCode":200 + }, + "input":{"shape":"ListAttendeeTagsRequest"}, + "output":{"shape":"ListAttendeeTagsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists the tags applied to an Amazon Chime SDK attendee resource.

" + }, "ListAttendees":{ "name":"ListAttendees", "http":{ @@ -1245,6 +1441,26 @@ ], "documentation":"

Lists the bots associated with the administrator's Amazon Chime Enterprise account ID.

" }, + "ListMeetingTags":{ + "name":"ListMeetingTags", + "http":{ + "method":"GET", + "requestUri":"/meetings/{meetingId}/tags", + "responseCode":200 + }, + "input":{"shape":"ListMeetingTagsRequest"}, + "output":{"shape":"ListMeetingTagsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists the tags applied to an Amazon Chime SDK meeting resource.

" + }, "ListMeetings":{ "name":"ListMeetings", "http":{ @@ -1301,6 +1517,26 @@ ], "documentation":"

Lists the phone numbers for the specified Amazon Chime account, Amazon Chime user, Amazon Chime Voice Connector, or Amazon Chime Voice Connector group.

" }, + "ListProxySessions":{ + "name":"ListProxySessions", + "http":{ + "method":"GET", + "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions", + "responseCode":200 + }, + "input":{"shape":"ListProxySessionsRequest"}, + "output":{"shape":"ListProxySessionsResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists the proxy sessions for the specified Amazon Chime Voice Connector.

" + }, "ListRoomMemberships":{ "name":"ListRoomMemberships", "http":{ @@ -1341,6 +1577,24 @@ ], "documentation":"

Lists the room details for the specified Amazon Chime Enterprise account. Optionally, filter the results by a member ID (user ID or bot ID) to see a list of rooms that the member belongs to.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Lists the tags applied to an Amazon Chime SDK meeting resource.

" + }, "ListUsers":{ "name":"ListUsers", "http":{ @@ -1459,6 +1713,47 @@ ], "documentation":"

Creates an events configuration that allows a bot to receive outgoing events sent by Amazon Chime. Choose either an HTTPS endpoint or a Lambda function ARN. For more information, see Bot.

" }, + "PutRetentionSettings":{ + "name":"PutRetentionSettings", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{accountId}/retention-settings", + "responseCode":204 + }, + "input":{"shape":"PutRetentionSettingsRequest"}, + "output":{"shape":"PutRetentionSettingsResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Puts retention settings for the specified Amazon Chime Enterprise account. We recommend using AWS CloudTrail to monitor usage of this API for your account. For more information, see Logging Amazon Chime API Calls with AWS CloudTrail in the Amazon Chime Administration Guide.

To turn off existing retention settings, remove the number of days from the corresponding RetentionDays field in the RetentionSettings object. For more information about retention settings, see Managing Chat Retention Policies in the Amazon Chime Administration Guide.

" + }, + "PutVoiceConnectorEmergencyCallingConfiguration":{ + "name":"PutVoiceConnectorEmergencyCallingConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/voice-connectors/{voiceConnectorId}/emergency-calling-configuration", + "responseCode":200 + }, + "input":{"shape":"PutVoiceConnectorEmergencyCallingConfigurationRequest"}, + "output":{"shape":"PutVoiceConnectorEmergencyCallingConfigurationResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Puts emergency calling configuration details to the specified Amazon Chime Voice Connector, such as emergency phone numbers and calling countries. Origination and termination settings must be enabled for the Amazon Chime Voice Connector before emergency calling can be configured.

" + }, "PutVoiceConnectorLoggingConfiguration":{ "name":"PutVoiceConnectorLoggingConfiguration", "http":{ @@ -1497,7 +1792,27 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds origination settings for the specified Amazon Chime Voice Connector.

" + "documentation":"

Adds origination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to turning off origination settings.

" + }, + "PutVoiceConnectorProxy":{ + "name":"PutVoiceConnectorProxy", + "http":{ + "method":"PUT", + "requestUri":"/voice-connectors/{voiceConnectorId}/programmable-numbers/proxy" + }, + "input":{"shape":"PutVoiceConnectorProxyRequest"}, + "output":{"shape":"PutVoiceConnectorProxyResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Puts the specified proxy configuration to the specified Amazon Chime Voice Connector.

" }, "PutVoiceConnectorStreamingConfiguration":{ "name":"PutVoiceConnectorStreamingConfiguration", @@ -1538,7 +1853,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds termination settings for the specified Amazon Chime Voice Connector.

" + "documentation":"

Adds termination settings for the specified Amazon Chime Voice Connector.

If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to turning off termination settings.

" }, "PutVoiceConnectorTerminationCredentials":{ "name":"PutVoiceConnectorTerminationCredentials", @@ -1559,6 +1874,46 @@ ], "documentation":"

Adds termination SIP credentials for the specified Amazon Chime Voice Connector.

" }, + "RedactConversationMessage":{ + "name":"RedactConversationMessage", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/conversations/{conversationId}/messages/{messageId}?operation=redact", + "responseCode":200 + }, + "input":{"shape":"RedactConversationMessageRequest"}, + "output":{"shape":"RedactConversationMessageResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"BadRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Redacts the specified message from the specified Amazon Chime conversation.

" + }, + "RedactRoomMessage":{ + "name":"RedactRoomMessage", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/rooms/{roomId}/messages/{messageId}?operation=redact", + "responseCode":200 + }, + "input":{"shape":"RedactRoomMessageRequest"}, + "output":{"shape":"RedactRoomMessageResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ThrottledClientException"}, + {"shape":"BadRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Redacts the specified message from the specified Amazon Chime chat room.

" + }, "RegenerateSecurityToken":{ "name":"RegenerateSecurityToken", "http":{ @@ -1639,14 +1994,128 @@ ], "documentation":"

Searches phone numbers that can be ordered.

" }, - "UpdateAccount":{ - "name":"UpdateAccount", + "TagAttendee":{ + "name":"TagAttendee", "http":{ "method":"POST", - "requestUri":"/accounts/{accountId}", - "responseCode":200 + "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}/tags?operation=add", + "responseCode":204 }, - "input":{"shape":"UpdateAccountRequest"}, + "input":{"shape":"TagAttendeeRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Applies the specified tags to the specified Amazon Chime SDK attendee.

" + }, + "TagMeeting":{ + "name":"TagMeeting", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/tags?operation=add", + "responseCode":204 + }, + "input":{"shape":"TagMeetingRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Applies the specified tags to the specified Amazon Chime SDK meeting.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags?operation=tag-resource", + "responseCode":204 + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Applies the specified tags to the specified Amazon Chime SDK meeting resource.

" + }, + "UntagAttendee":{ + "name":"UntagAttendee", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}/tags?operation=delete", + "responseCode":204 + }, + "input":{"shape":"UntagAttendeeRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Untags the specified tags from the specified Amazon Chime SDK attendee.

" + }, + "UntagMeeting":{ + "name":"UntagMeeting", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/tags?operation=delete", + "responseCode":204 + }, + "input":{"shape":"UntagMeetingRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Untags the specified tags from the specified Amazon Chime SDK meeting.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/tags?operation=untag-resource", + "responseCode":204 + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Untags the specified tags from the specified Amazon Chime SDK meeting resource.

" + }, + "UpdateAccount":{ + "name":"UpdateAccount", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}", + "responseCode":200 + }, + "input":{"shape":"UpdateAccountRequest"}, "output":{"shape":"UpdateAccountResponse"}, "errors":[ {"shape":"UnauthorizedClientException"}, @@ -1756,6 +2225,26 @@ ], "documentation":"

Updates the phone number settings for the administrator's AWS account, such as the default outbound calling name. You can update the default outbound calling name once every seven days. Outbound calling names can take up to 72 hours to update.

" }, + "UpdateProxySession":{ + "name":"UpdateProxySession", + "http":{ + "method":"POST", + "requestUri":"/voice-connectors/{voiceConnectorId}/proxy-sessions/{proxySessionId}", + "responseCode":201 + }, + "input":{"shape":"UpdateProxySessionRequest"}, + "output":{"shape":"UpdateProxySessionResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"ForbiddenException"}, + {"shape":"BadRequestException"}, + {"shape":"ThrottledClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ], + "documentation":"

Updates the specified proxy session details, such as voice or SMS capabilities.

" + }, "UpdateRoom":{ "name":"UpdateRoom", "http":{ @@ -1978,6 +2467,14 @@ }, "documentation":"

The Alexa for Business metadata associated with an Amazon Chime user, used to integrate Alexa for Business with a device.

" }, + "Alpha2CountryCode":{ + "type":"string", + "pattern":"[A-Z]{2}" + }, + "AreaCode":{ + "type":"string", + "pattern":"^$|^[0-9]{3,3}$" + }, "Arn":{ "type":"string", "max":1024, @@ -2018,7 +2515,10 @@ }, "AssociatePhoneNumbersWithVoiceConnectorGroupRequest":{ "type":"structure", - "required":["VoiceConnectorGroupId"], + "required":[ + "VoiceConnectorGroupId", + "E164PhoneNumbers" + ], "members":{ "VoiceConnectorGroupId":{ "shape":"NonEmptyString", @@ -2047,7 +2547,10 @@ }, "AssociatePhoneNumbersWithVoiceConnectorRequest":{ "type":"structure", - "required":["VoiceConnectorId"], + "required":[ + "VoiceConnectorId", + "E164PhoneNumbers" + ], "members":{ "VoiceConnectorId":{ "shape":"NonEmptyString", @@ -2120,6 +2623,18 @@ "type":"list", "member":{"shape":"Attendee"} }, + "AttendeeTagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":10, + "min":1 + }, + "AttendeeTagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":10, + "min":1 + }, "BadRequestException":{ "type":"structure", "members":{ @@ -2404,6 +2919,17 @@ "type":"list", "member":{"shape":"CallingRegion"} }, + "Capability":{ + "type":"string", + "enum":[ + "Voice", + "SMS" + ] + }, + "CapabilityList":{ + "type":"list", + "member":{"shape":"Capability"} + }, "ClientRequestToken":{ "type":"string", "max":64, @@ -2421,6 +2947,26 @@ "error":{"httpStatusCode":409}, "exception":true }, + "ConversationRetentionSettings":{ + "type":"structure", + "members":{ + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days for which to retain chat conversation messages.

" + } + }, + "documentation":"

The retention settings that determine how long to retain chat conversation messages for an Amazon Chime Enterprise account.

" + }, + "Country":{ + "type":"string", + "pattern":"^$|^[A-Z]{2,2}$" + }, + "CountryList":{ + "type":"list", + "member":{"shape":"Country"}, + "max":100, + "min":1 + }, "CpsLimit":{ "type":"integer", "min":1 @@ -2478,6 +3024,10 @@ "ExternalUserId":{ "shape":"ExternalUserIdType", "documentation":"

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

" + }, + "Tags":{ + "shape":"AttendeeTagList", + "documentation":"

The tag key-value pairs.

" } } }, @@ -2488,6 +3038,10 @@ "ExternalUserId":{ "shape":"ExternalUserIdType", "documentation":"

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

" + }, + "Tags":{ + "shape":"AttendeeTagList", + "documentation":"

The tag key-value pairs.

" } }, "documentation":"

The Amazon Chime SDK attendee fields to create, used with the BatchCreateAttendee action.

" @@ -2546,13 +3100,21 @@ "documentation":"

The unique identifier for the client request. Use a different token for different meetings.

", "idempotencyToken":true }, + "ExternalMeetingId":{ + "shape":"ExternalMeetingIdType", + "documentation":"

The external meeting ID.

" + }, "MeetingHostId":{ "shape":"ExternalUserIdType", "documentation":"

Reserved.

" }, "MediaRegion":{ "shape":"String", - "documentation":"

The Region in which to create the meeting. Available values: ap-northeast-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" + "documentation":"

The Region in which to create the meeting. Default: us-east-1.

Available values: ap-northeast-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" + }, + "Tags":{ + "shape":"MeetingTagList", + "documentation":"

The tag key-value pairs.

" }, "NotificationsConfiguration":{ "shape":"MeetingNotificationConfiguration", @@ -2569,6 +3131,58 @@ } } }, + "CreateMeetingWithAttendeesRequest":{ + "type":"structure", + "required":["ClientRequestToken"], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The unique identifier for the client request. Use a different token for different meetings.

", + "idempotencyToken":true + }, + "ExternalMeetingId":{ + "shape":"ExternalMeetingIdType", + "documentation":"

The external meeting ID.

" + }, + "MeetingHostId":{ + "shape":"ExternalUserIdType", + "documentation":"

Reserved.

" + }, + "MediaRegion":{ + "shape":"String", + "documentation":"

The Region in which to create the meeting. Default: us-east-1.

Available values: ap-northeast-1, ap-southeast-1, ap-southeast-2, ca-central-1, eu-central-1, eu-north-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-east-1, us-east-2, us-west-1, us-west-2.

" + }, + "Tags":{ + "shape":"MeetingTagList", + "documentation":"

The tag key-value pairs.

" + }, + "NotificationsConfiguration":{"shape":"MeetingNotificationConfiguration"}, + "Attendees":{ + "shape":"CreateMeetingWithAttendeesRequestItemList", + "documentation":"

The request containing the attendees to create.

" + } + } + }, + "CreateMeetingWithAttendeesRequestItemList":{ + "type":"list", + "member":{"shape":"CreateAttendeeRequestItem"}, + "max":5, + "min":1 + }, + "CreateMeetingWithAttendeesResponse":{ + "type":"structure", + "members":{ + "Meeting":{"shape":"Meeting"}, + "Attendees":{ + "shape":"AttendeeList", + "documentation":"

The attendee information, including attendees IDs and join tokens.

" + }, + "Errors":{ + "shape":"BatchCreateAttendeeErrorList", + "documentation":"

If the action fails for one or more of the attendees in the request, a list of the attendees is returned, along with error codes and error messages.

" + } + } + }, "CreatePhoneNumberOrderRequest":{ "type":"structure", "required":[ @@ -2595,6 +3209,59 @@ } } }, + "CreateProxySessionRequest":{ + "type":"structure", + "required":[ + "ParticipantPhoneNumbers", + "Capabilities", + "VoiceConnectorId" + ], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + }, + "ParticipantPhoneNumbers":{ + "shape":"ParticipantPhoneNumberList", + "documentation":"

The participant phone numbers.

" + }, + "Name":{ + "shape":"ProxySessionNameString", + "documentation":"

The name of the proxy session.

" + }, + "ExpiryMinutes":{ + "shape":"PositiveInteger", + "documentation":"

The number of minutes allowed for the proxy session.

" + }, + "Capabilities":{ + "shape":"CapabilityList", + "documentation":"

The proxy session capabilities.

" + }, + "NumberSelectionBehavior":{ + "shape":"NumberSelectionBehavior", + "documentation":"

The preference for proxy phone number reuse, or stickiness, between the same participants across sessions.

" + }, + "GeoMatchLevel":{ + "shape":"GeoMatchLevel", + "documentation":"

The preference for matching the country or area code of the proxy phone number with that of the first participant.

" + }, + "GeoMatchParams":{ + "shape":"GeoMatchParams", + "documentation":"

The country and area code for the proxy phone number.

" + } + } + }, + "CreateProxySessionResponse":{ + "type":"structure", + "members":{ + "ProxySession":{ + "shape":"ProxySession", + "documentation":"

The proxy session details.

" + } + } + }, "CreateRoomMembershipRequest":{ "type":"structure", "required":[ @@ -2768,6 +3435,32 @@ "type":"list", "member":{"shape":"Credential"} }, + "DNISEmergencyCallingConfiguration":{ + "type":"structure", + "required":[ + "EmergencyPhoneNumber", + "CallingCountry" + ], + "members":{ + "EmergencyPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The DNIS phone number to route emergency calls to, in E.164 format.

" + }, + "TestPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The DNIS phone number to route test emergency calls to, in E.164 format.

" + }, + "CallingCountry":{ + "shape":"Alpha2CountryCode", + "documentation":"

The country from which emergency calls are allowed, in ISO 3166-1 alpha-2 format.

" + } + }, + "documentation":"

The Dialed Number Identification Service (DNIS) emergency calling configuration details associated with an Amazon Chime Voice Connector's emergency calling configuration.

" + }, + "DNISEmergencyCallingConfigurationList":{ + "type":"list", + "member":{"shape":"DNISEmergencyCallingConfiguration"} + }, "DataRetentionInHours":{ "type":"integer", "min":0 @@ -2855,6 +3548,27 @@ } } }, + "DeleteProxySessionRequest":{ + "type":"structure", + "required":[ + "VoiceConnectorId", + "ProxySessionId" + ], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + }, + "ProxySessionId":{ + "shape":"NonEmptyString128", + "documentation":"

The proxy session ID.

", + "location":"uri", + "locationName":"proxySessionId" + } + } + }, "DeleteRoomMembershipRequest":{ "type":"structure", "required":[ @@ -2904,6 +3618,18 @@ } } }, + "DeleteVoiceConnectorEmergencyCallingConfigurationRequest":{ + "type":"structure", + "required":["VoiceConnectorId"], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + } + } + }, "DeleteVoiceConnectorGroupRequest":{ "type":"structure", "required":["VoiceConnectorGroupId"], @@ -2928,6 +3654,18 @@ } } }, + "DeleteVoiceConnectorProxyRequest":{ + "type":"structure", + "required":["VoiceConnectorId"], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + } + } + }, "DeleteVoiceConnectorRequest":{ "type":"structure", "required":["VoiceConnectorId"], @@ -2954,7 +3692,10 @@ }, "DeleteVoiceConnectorTerminationCredentialsRequest":{ "type":"structure", - "required":["VoiceConnectorId"], + "required":[ + "Usernames", + "VoiceConnectorId" + ], "members":{ "VoiceConnectorId":{ "shape":"NonEmptyString", @@ -3008,7 +3749,10 @@ }, "DisassociatePhoneNumbersFromVoiceConnectorGroupRequest":{ "type":"structure", - "required":["VoiceConnectorGroupId"], + "required":[ + "VoiceConnectorGroupId", + "E164PhoneNumbers" + ], "members":{ "VoiceConnectorGroupId":{ "shape":"NonEmptyString", @@ -3033,7 +3777,10 @@ }, "DisassociatePhoneNumbersFromVoiceConnectorRequest":{ "type":"structure", - "required":["VoiceConnectorId"], + "required":[ + "VoiceConnectorId", + "E164PhoneNumbers" + ], "members":{ "VoiceConnectorId":{ "shape":"NonEmptyString", @@ -3102,6 +3849,16 @@ "Failed" ] }, + "EmergencyCallingConfiguration":{ + "type":"structure", + "members":{ + "DNIS":{ + "shape":"DNISEmergencyCallingConfigurationList", + "documentation":"

The Dialed Number Identification Service (DNIS) emergency calling configuration details.

" + } + }, + "documentation":"

The emergency calling configuration details associated with an Amazon Chime Voice Connector.

" + }, "ErrorCode":{ "type":"string", "enum":[ @@ -3139,6 +3896,12 @@ }, "documentation":"

The configuration that allows a bot to receive outgoing events. Can be either an HTTPS endpoint or a Lambda function ARN.

" }, + "ExternalMeetingIdType":{ + "type":"string", + "max":64, + "min":2, + "sensitive":true + }, "ExternalUserIdType":{ "type":"string", "max":64, @@ -3155,6 +3918,31 @@ "error":{"httpStatusCode":403}, "exception":true }, + "GeoMatchLevel":{ + "type":"string", + "enum":[ + "Country", + "AreaCode" + ] + }, + "GeoMatchParams":{ + "type":"structure", + "required":[ + "Country", + "AreaCode" + ], + "members":{ + "Country":{ + "shape":"Country", + "documentation":"

The country.

" + }, + "AreaCode":{ + "shape":"AreaCode", + "documentation":"

The area code.

" + } + }, + "documentation":"

The country and area code for a proxy phone number in a proxy phone session.

" + }, "GetAccountRequest":{ "type":"structure", "required":["AccountId"], @@ -3376,6 +4164,61 @@ } } }, + "GetProxySessionRequest":{ + "type":"structure", + "required":[ + "VoiceConnectorId", + "ProxySessionId" + ], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + }, + "ProxySessionId":{ + "shape":"NonEmptyString128", + "documentation":"

The proxy session ID.

", + "location":"uri", + "locationName":"proxySessionId" + } + } + }, + "GetProxySessionResponse":{ + "type":"structure", + "members":{ + "ProxySession":{ + "shape":"ProxySession", + "documentation":"

The proxy session details.

" + } + } + }, + "GetRetentionSettingsRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + } + } + }, + "GetRetentionSettingsResponse":{ + "type":"structure", + "members":{ + "RetentionSettings":{ + "shape":"RetentionSettings", + "documentation":"

The retention settings.

" + }, + "InitiateDeletionTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The timestamp representing the time at which the specified items are permanently deleted, in ISO 8601 format.

" + } + } + }, "GetRoomRequest":{ "type":"structure", "required":[ @@ -3466,6 +4309,27 @@ } } }, + "GetVoiceConnectorEmergencyCallingConfigurationRequest":{ + "type":"structure", + "required":["VoiceConnectorId"], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + } + } + }, + "GetVoiceConnectorEmergencyCallingConfigurationResponse":{ + "type":"structure", + "members":{ + "EmergencyCallingConfiguration":{ + "shape":"EmergencyCallingConfiguration", + "documentation":"

The emergency calling configuration details.

" + } + } + }, "GetVoiceConnectorGroupRequest":{ "type":"structure", "required":["VoiceConnectorGroupId"], @@ -3529,6 +4393,27 @@ } } }, + "GetVoiceConnectorProxyRequest":{ + "type":"structure", + "required":["VoiceConnectorId"], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + } + } + }, + "GetVoiceConnectorProxyResponse":{ + "type":"structure", + "members":{ + "Proxy":{ + "shape":"Proxy", + "documentation":"

The proxy configuration details.

" + } + } + }, "GetVoiceConnectorRequest":{ "type":"structure", "required":["VoiceConnectorId"], @@ -3617,6 +4502,7 @@ "type":"string", "pattern":"[a-fA-F0-9]{8}(?:-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}" }, + "Integer":{"type":"integer"}, "Invite":{ "type":"structure", "members":{ @@ -3748,6 +4634,36 @@ } } }, + "ListAttendeeTagsRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "AttendeeId" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", + "location":"uri", + "locationName":"meetingId" + }, + "AttendeeId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK attendee ID.

", + "location":"uri", + "locationName":"attendeeId" + } + } + }, + "ListAttendeeTagsResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag key-value pairs.

" + } + } + }, "ListAttendeesRequest":{ "type":"structure", "required":["MeetingId"], @@ -3822,6 +4738,27 @@ } } }, + "ListMeetingTagsRequest":{ + "type":"structure", + "required":["MeetingId"], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", + "location":"uri", + "locationName":"meetingId" + } + } + }, + "ListMeetingTagsResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag key-value pairs.

" + } + } + }, "ListMeetingsRequest":{ "type":"structure", "members":{ @@ -3936,6 +4873,49 @@ } } }, + "ListProxySessionsRequest":{ + "type":"structure", + "required":["VoiceConnectorId"], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + }, + "Status":{ + "shape":"ProxySessionStatus", + "documentation":"

The proxy session status.

", + "location":"querystring", + "locationName":"status" + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"

The token to use to retrieve the next page of results.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"ResultMax", + "documentation":"

The maximum number of results to return in a single call.

", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListProxySessionsResponse":{ + "type":"structure", + "members":{ + "ProxySessions":{ + "shape":"ProxySessions", + "documentation":"

The proxy session details.

" + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"

The token to use to retrieve the next page of results.

" + } + } + }, "ListRoomMembershipsRequest":{ "type":"structure", "required":[ @@ -4025,6 +5005,27 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"Arn", + "documentation":"

The resource ARN.

", + "location":"querystring", + "locationName":"arn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag-key value pairs.

" + } + } + }, "ListUsersRequest":{ "type":"structure", "required":["AccountId"], @@ -4232,6 +5233,10 @@ "shape":"GuidString", "documentation":"

The Amazon Chime SDK meeting ID.

" }, + "ExternalMeetingId":{ + "shape":"ExternalMeetingIdType", + "documentation":"

The external meeting ID.

" + }, "MediaPlacement":{ "shape":"MediaPlacement", "documentation":"

The media placement for the meeting.

" @@ -4259,7 +5264,19 @@ "documentation":"

The SQS queue ARN.

" } }, - "documentation":"

The configuration for resource targets to receive notifications when Amazon Chime SDK meeting and attendee events occur.

" + "documentation":"

The configuration for resource targets to receive notifications when Amazon Chime SDK meeting and attendee events occur. The Amazon Chime SDK supports resource targets located in the US East (N. Virginia) AWS Region (us-east-1).

" + }, + "MeetingTagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "MeetingTagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":1 }, "Member":{ "type":"structure", @@ -4336,10 +5353,20 @@ "member":{"shape":"MembershipItem"}, "max":50 }, + "NextTokenString":{ + "type":"string", + "max":65535 + }, "NonEmptyString":{ "type":"string", "pattern":".*\\S.*" }, + "NonEmptyString128":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*\\S.*" + }, "NonEmptyStringList":{ "type":"list", "member":{"shape":"String"}, @@ -4355,7 +5382,22 @@ "error":{"httpStatusCode":404}, "exception":true }, + "NotificationTarget":{ + "type":"string", + "enum":[ + "EventBridge", + "SNS", + "SQS" + ] + }, "NullableBoolean":{"type":"boolean"}, + "NumberSelectionBehavior":{ + "type":"string", + "enum":[ + "PreferSticky", + "AvoidSticky" + ] + }, "OrderedPhoneNumber":{ "type":"structure", "members":{ @@ -4443,6 +5485,30 @@ "max":100, "min":1 }, + "Participant":{ + "type":"structure", + "members":{ + "PhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The participant's phone number.

" + }, + "ProxyPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The participant's proxy phone number.

" + } + }, + "documentation":"

The phone number and proxy phone number for a participant in an Amazon Chime Voice Connector proxy session.

" + }, + "ParticipantPhoneNumberList":{ + "type":"list", + "member":{"shape":"E164PhoneNumber"}, + "max":2, + "min":2 + }, + "Participants":{ + "type":"list", + "member":{"shape":"Participant"} + }, "PhoneNumber":{ "type":"structure", "members":{ @@ -4664,49 +5730,210 @@ "max":65535, "min":0 }, + "PositiveInteger":{ + "type":"integer", + "min":1 + }, "ProfileServiceMaxResults":{ "type":"integer", "max":200, "min":1 }, - "PutEventsConfigurationRequest":{ + "Proxy":{ "type":"structure", - "required":[ - "AccountId", - "BotId" - ], "members":{ - "AccountId":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Chime account ID.

", - "location":"uri", - "locationName":"accountId" + "DefaultSessionExpiryMinutes":{ + "shape":"Integer", + "documentation":"

The default number of minutes allowed for proxy sessions.

" }, - "BotId":{ - "shape":"NonEmptyString", - "documentation":"

The bot ID.

", - "location":"uri", - "locationName":"botId" + "Disabled":{ + "shape":"Boolean", + "documentation":"

When true, stops proxy sessions from being created on the specified Amazon Chime Voice Connector.

" }, - "OutboundEventsHTTPSEndpoint":{ - "shape":"SensitiveString", - "documentation":"

HTTPS endpoint that allows the bot to receive outgoing events.

" + "FallBackPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The phone number to route calls to after a proxy session expires.

" }, - "LambdaFunctionArn":{ - "shape":"SensitiveString", - "documentation":"

Lambda function ARN that allows the bot to receive outgoing events.

" + "PhoneNumberCountries":{ + "shape":"StringList", + "documentation":"

The countries for proxy phone numbers to be selected from.

" } - } + }, + "documentation":"

The proxy configuration for an Amazon Chime Voice Connector.

" }, - "PutEventsConfigurationResponse":{ + "ProxySession":{ "type":"structure", "members":{ - "EventsConfiguration":{"shape":"EventsConfiguration"} - } - }, - "PutVoiceConnectorLoggingConfigurationRequest":{ - "type":"structure", - "required":[ + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

" + }, + "ProxySessionId":{ + "shape":"NonEmptyString128", + "documentation":"

The proxy session ID.

" + }, + "Name":{ + "shape":"String128", + "documentation":"

The name of the proxy session.

" + }, + "Status":{ + "shape":"ProxySessionStatus", + "documentation":"

The status of the proxy session.

" + }, + "ExpiryMinutes":{ + "shape":"PositiveInteger", + "documentation":"

The number of minutes allowed for the proxy session.

" + }, + "Capabilities":{ + "shape":"CapabilityList", + "documentation":"

The proxy session capabilities.

" + }, + "CreatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The created timestamp, in ISO 8601 format.

" + }, + "UpdatedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The updated timestamp, in ISO 8601 format.

" + }, + "EndedTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The ended timestamp, in ISO 8601 format.

" + }, + "Participants":{ + "shape":"Participants", + "documentation":"

The proxy session participants.

" + }, + "NumberSelectionBehavior":{ + "shape":"NumberSelectionBehavior", + "documentation":"

The preference for proxy phone number reuse, or stickiness, between the same participants across sessions.

" + }, + "GeoMatchLevel":{ + "shape":"GeoMatchLevel", + "documentation":"

The preference for matching the country or area code of the proxy phone number with that of the first participant.

" + }, + "GeoMatchParams":{ + "shape":"GeoMatchParams", + "documentation":"

The country and area code for the proxy phone number.

" + } + }, + "documentation":"

The proxy session for an Amazon Chime Voice Connector.

" + }, + "ProxySessionNameString":{ + "type":"string", + "pattern":"^$|^[a-zA-Z0-9 ]{0,30}$", + "sensitive":true + }, + "ProxySessionStatus":{ + "type":"string", + "enum":[ + "Open", + "InProgress", + "Closed" + ] + }, + "ProxySessions":{ + "type":"list", + "member":{"shape":"ProxySession"} + }, + "PutEventsConfigurationRequest":{ + "type":"structure", + "required":[ + "AccountId", + "BotId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "BotId":{ + "shape":"NonEmptyString", + "documentation":"

The bot ID.

", + "location":"uri", + "locationName":"botId" + }, + "OutboundEventsHTTPSEndpoint":{ + "shape":"SensitiveString", + "documentation":"

HTTPS endpoint that allows the bot to receive outgoing events.

" + }, + "LambdaFunctionArn":{ + "shape":"SensitiveString", + "documentation":"

Lambda function ARN that allows the bot to receive outgoing events.

" + } + } + }, + "PutEventsConfigurationResponse":{ + "type":"structure", + "members":{ + "EventsConfiguration":{"shape":"EventsConfiguration"} + } + }, + "PutRetentionSettingsRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RetentionSettings" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "RetentionSettings":{ + "shape":"RetentionSettings", + "documentation":"

The retention settings.

" + } + } + }, + "PutRetentionSettingsResponse":{ + "type":"structure", + "members":{ + "RetentionSettings":{ + "shape":"RetentionSettings", + "documentation":"

The retention settings.

" + }, + "InitiateDeletionTimestamp":{ + "shape":"Iso8601Timestamp", + "documentation":"

The timestamp representing the time at which the specified items are permanently deleted, in ISO 8601 format.

" + } + } + }, + "PutVoiceConnectorEmergencyCallingConfigurationRequest":{ + "type":"structure", + "required":[ + "VoiceConnectorId", + "EmergencyCallingConfiguration" + ], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime Voice Connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + }, + "EmergencyCallingConfiguration":{ + "shape":"EmergencyCallingConfiguration", + "documentation":"

The emergency calling configuration details.

" + } + } + }, + "PutVoiceConnectorEmergencyCallingConfigurationResponse":{ + "type":"structure", + "members":{ + "EmergencyCallingConfiguration":{ + "shape":"EmergencyCallingConfiguration", + "documentation":"

The emergency calling configuration details.

" + } + } + }, + "PutVoiceConnectorLoggingConfigurationRequest":{ + "type":"structure", + "required":[ "VoiceConnectorId", "LoggingConfiguration" ], @@ -4760,6 +5987,47 @@ } } }, + "PutVoiceConnectorProxyRequest":{ + "type":"structure", + "required":[ + "DefaultSessionExpiryMinutes", + "PhoneNumberPoolCountries", + "VoiceConnectorId" + ], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + }, + "DefaultSessionExpiryMinutes":{ + "shape":"Integer", + "documentation":"

The default number of minutes allowed for proxy sessions.

" + }, + "PhoneNumberPoolCountries":{ + "shape":"CountryList", + "documentation":"

The countries for proxy phone numbers to be selected from.

" + }, + "FallBackPhoneNumber":{ + "shape":"E164PhoneNumber", + "documentation":"

The phone number to route calls to after a proxy session expires.

" + }, + "Disabled":{ + "shape":"Boolean", + "documentation":"

When true, stops proxy sessions from being created on the specified Amazon Chime Voice Connector.

" + } + } + }, + "PutVoiceConnectorProxyResponse":{ + "type":"structure", + "members":{ + "Proxy":{ + "shape":"Proxy", + "documentation":"

The proxy configuration details.

" + } + } + }, "PutVoiceConnectorStreamingConfigurationRequest":{ "type":"structure", "required":[ @@ -4832,6 +6100,72 @@ } } }, + "RedactConversationMessageRequest":{ + "type":"structure", + "required":[ + "AccountId", + "ConversationId", + "MessageId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "ConversationId":{ + "shape":"NonEmptyString", + "documentation":"

The conversation ID.

", + "location":"uri", + "locationName":"conversationId" + }, + "MessageId":{ + "shape":"NonEmptyString", + "documentation":"

The message ID.

", + "location":"uri", + "locationName":"messageId" + } + } + }, + "RedactConversationMessageResponse":{ + "type":"structure", + "members":{ + } + }, + "RedactRoomMessageRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RoomId", + "MessageId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Chime account ID.

", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "documentation":"

The room ID.

", + "location":"uri", + "locationName":"roomId" + }, + "MessageId":{ + "shape":"NonEmptyString", + "documentation":"

The message ID.

", + "location":"uri", + "locationName":"messageId" + } + } + }, + "RedactRoomMessageResponse":{ + "type":"structure", + "members":{ + } + }, "RegenerateSecurityTokenRequest":{ "type":"structure", "required":[ @@ -4933,6 +6267,25 @@ "max":99, "min":1 }, + "RetentionDays":{ + "type":"integer", + "max":5475, + "min":1 + }, + "RetentionSettings":{ + "type":"structure", + "members":{ + "RoomRetentionSettings":{ + "shape":"RoomRetentionSettings", + "documentation":"

The chat room retention settings.

" + }, + "ConversationRetentionSettings":{ + "shape":"ConversationRetentionSettings", + "documentation":"

The chat conversation retention settings.

" + } + }, + "documentation":"

The retention settings for an Amazon Chime Enterprise account that determine how long to retain items such as chat room messages and chat conversation messages.

" + }, "Room":{ "type":"structure", "members":{ @@ -5001,6 +6354,16 @@ "Member" ] }, + "RoomRetentionSettings":{ + "type":"structure", + "members":{ + "RetentionDays":{ + "shape":"RetentionDays", + "documentation":"

The number of days for which to retain chat room messages.

" + } + }, + "documentation":"

The retention settings that determine how long to retain chat room messages for an Amazon Chime Enterprise account.

" + }, "SearchAvailablePhoneNumbersRequest":{ "type":"structure", "members":{ @@ -5112,15 +6475,144 @@ "Disabled":{ "shape":"Boolean", "documentation":"

When true, media streaming to Amazon Kinesis is turned off.

" + }, + "StreamingNotificationTargets":{ + "shape":"StreamingNotificationTargetList", + "documentation":"

The streaming notification targets.

" } }, "documentation":"

The streaming configuration associated with an Amazon Chime Voice Connector. Specifies whether media streaming is enabled for sending to Amazon Kinesis, and shows the retention period for the Amazon Kinesis data, in hours.

" }, + "StreamingNotificationTarget":{ + "type":"structure", + "required":["NotificationTarget"], + "members":{ + "NotificationTarget":{ + "shape":"NotificationTarget", + "documentation":"

The streaming notification target.

" + } + }, + "documentation":"

The targeted recipient for a streaming configuration notification.

" + }, + "StreamingNotificationTargetList":{ + "type":"list", + "member":{"shape":"StreamingNotificationTarget"}, + "max":3, + "min":1 + }, "String":{"type":"string"}, + "String128":{ + "type":"string", + "max":128 + }, "StringList":{ "type":"list", "member":{"shape":"String"} }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key of the tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value of the tag.

" + } + }, + "documentation":"

Describes a tag applied to a resource.

" + }, + "TagAttendeeRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "AttendeeId", + "Tags" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", + "location":"uri", + "locationName":"meetingId" + }, + "AttendeeId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK attendee ID.

", + "location":"uri", + "locationName":"attendeeId" + }, + "Tags":{ + "shape":"AttendeeTagList", + "documentation":"

The tag key-value pairs.

" + } + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "sensitive":true + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":50, + "min":1 + }, + "TagMeetingRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "Tags" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", + "location":"uri", + "locationName":"meetingId" + }, + "Tags":{ + "shape":"MeetingTagList", + "documentation":"

The tag key-value pairs.

" + } + } + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"Arn", + "documentation":"

The resource ARN.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tag key-value pairs.

" + } + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1, + "sensitive":true + }, "TelephonySettings":{ "type":"structure", "required":[ @@ -5220,6 +6712,68 @@ "error":{"httpStatusCode":422}, "exception":true }, + "UntagAttendeeRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "TagKeys", + "AttendeeId" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", + "location":"uri", + "locationName":"meetingId" + }, + "AttendeeId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK attendee ID.

", + "location":"uri", + "locationName":"attendeeId" + }, + "TagKeys":{ + "shape":"AttendeeTagKeyList", + "documentation":"

The tag keys.

" + } + } + }, + "UntagMeetingRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "TagKeys" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "documentation":"

The Amazon Chime SDK meeting ID.

", + "location":"uri", + "locationName":"meetingId" + }, + "TagKeys":{ + "shape":"MeetingTagKeyList", + "documentation":"

The tag keys.

" + } + } + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"Arn", + "documentation":"

The resource ARN.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag keys.

" + } + } + }, "UpdateAccountRequest":{ "type":"structure", "required":["AccountId"], @@ -5382,6 +6936,45 @@ } } }, + "UpdateProxySessionRequest":{ + "type":"structure", + "required":[ + "Capabilities", + "VoiceConnectorId", + "ProxySessionId" + ], + "members":{ + "VoiceConnectorId":{ + "shape":"NonEmptyString128", + "documentation":"

The Amazon Chime voice connector ID.

", + "location":"uri", + "locationName":"voiceConnectorId" + }, + "ProxySessionId":{ + "shape":"NonEmptyString128", + "documentation":"

The proxy session ID.

", + "location":"uri", + "locationName":"proxySessionId" + }, + "Capabilities":{ + "shape":"CapabilityList", + "documentation":"

The proxy session capabilities.

" + }, + "ExpiryMinutes":{ + "shape":"PositiveInteger", + "documentation":"

The number of minutes allowed for the proxy session.

" + } + } + }, + "UpdateProxySessionResponse":{ + "type":"structure", + "members":{ + "ProxySession":{ + "shape":"ProxySession", + "documentation":"

The proxy session details.

" + } + } + }, "UpdateRoomMembershipRequest":{ "type":"structure", "required":[ diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index dd95c579b13e..802e2164879b 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 cloud9 diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index b8a367d744ef..0a41b8cea137 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index 35928b862bb1..ff3595a87c49 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudformation/src/main/resources/codegen-resources/paginators-1.json b/services/cloudformation/src/main/resources/codegen-resources/paginators-1.json index fb1c4ff2699d..668a09eb347b 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/paginators-1.json +++ b/services/cloudformation/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,10 @@ { "pagination": { + "DescribeAccountLimits": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "AccountLimits" + }, "DescribeStackEvents": { "input_token": "NextToken", "output_token": "NextToken", @@ -18,6 +23,11 @@ "output_token": "NextToken", "result_key": "Stacks" }, + "ListChangeSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Summaries" + }, "ListExports": { "input_token": "NextToken", "output_token": "NextToken", @@ -28,11 +38,35 @@ "output_token": "NextToken", "result_key": "Imports" }, + "ListStackInstances": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Summaries" + }, "ListStackResources": { "input_token": "NextToken", "output_token": "NextToken", "result_key": "StackResourceSummaries" }, + "ListStackSetOperationResults": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Summaries" + }, + "ListStackSetOperations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Summaries" + }, + "ListStackSets": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Summaries" + }, "ListStacks": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/cloudformation/src/main/resources/codegen-resources/service-2.json b/services/cloudformation/src/main/resources/codegen-resources/service-2.json index e82b51e984d5..48e7d3e601c3 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/service-2.json @@ -95,7 +95,7 @@ {"shape":"InvalidOperationException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates stack instances for the specified accounts, within the specified regions. A stack instance refers to a stack in a specific account and region. You must specify at least one value for either Accounts or DeploymentTargets, and you must specify at least one value for Regions.

" + "documentation":"

Creates stack instances for the specified accounts, within the specified Regions. A stack instance refers to a stack in a specific account and Region. You must specify at least one value for either Accounts or DeploymentTargets, and you must specify at least one value for Regions.

" }, "CreateStackSet":{ "name":"CreateStackSet", @@ -161,7 +161,7 @@ {"shape":"StaleRequestException"}, {"shape":"InvalidOperationException"} ], - "documentation":"

Deletes stack instances for the specified accounts, in the specified regions.

" + "documentation":"

Deletes stack instances for the specified accounts, in the specified Regions.

" }, "DeleteStackSet":{ "name":"DeleteStackSet", @@ -268,7 +268,7 @@ {"shape":"StackSetNotFoundException"}, {"shape":"StackInstanceNotFoundException"} ], - "documentation":"

Returns the stack instance that's associated with the specified stack set, AWS account, and region.

For a list of stack instances that are associated with a specific stack set, use ListStackInstances.

" + "documentation":"

Returns the stack instance that's associated with the specified stack set, AWS account, and Region.

For a list of stack instances that are associated with a specific stack set, use ListStackInstances.

" }, "DescribeStackResource":{ "name":"DescribeStackResource", @@ -535,7 +535,7 @@ "shape":"ListExportsOutput", "resultWrapper":"ListExportsResult" }, - "documentation":"

Lists all exported output values in the account and region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function.

For more information, see AWS CloudFormation Export Stack Output Values.

" + "documentation":"

Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function.

For more information, see AWS CloudFormation Export Stack Output Values.

" }, "ListImports":{ "name":"ListImports", @@ -564,7 +564,7 @@ "errors":[ {"shape":"StackSetNotFoundException"} ], - "documentation":"

Returns summary information about stack instances that are associated with the specified stack set. You can filter for stack instances that are associated with a specific AWS account name or region.

" + "documentation":"

Returns summary information about stack instances that are associated with the specified stack set. You can filter for stack instances that are associated with a specific AWS account name or Region, or that have a specific status.

" }, "ListStackResources":{ "name":"ListStackResources", @@ -721,7 +721,7 @@ "errors":[ {"shape":"CFNRegistryException"} ], - "documentation":"

Registers a type with the CloudFormation service. Registering a type makes it available for use in CloudFormation templates in your AWS account, and includes:

  • Validating the resource schema

  • Determining which handlers have been specified for the resource

  • Making the resource type available for use in your account

For more information on how to develop types and ready them for registeration, see Creating Resource Providers in the CloudFormation CLI User Guide.

Once you have initiated a registration request using RegisterType , you can use DescribeTypeRegistration to monitor the progress of the registration request.

", + "documentation":"

Registers a type with the CloudFormation service. Registering a type makes it available for use in CloudFormation templates in your AWS account, and includes:

  • Validating the resource schema

  • Determining which handlers have been specified for the resource

  • Making the resource type available for use in your account

For more information on how to develop types and ready them for registeration, see Creating Resource Providers in the CloudFormation CLI User Guide.

You can have a maximum of 50 resource type versions registered at a time. This maximum is per account and per region. Use DeregisterType to deregister specific resource type versions if necessary.

Once you have initiated a registration request using RegisterType , you can use DescribeTypeRegistration to monitor the progress of the registration request.

", "idempotent":true }, "SetStackPolicy":{ @@ -814,7 +814,7 @@ {"shape":"StaleRequestException"}, {"shape":"InvalidOperationException"} ], - "documentation":"

Updates the parameter values for stack instances for the specified accounts, within the specified regions. A stack instance refers to a stack in a specific account and region.

You can only update stack instances in regions and accounts where they already exist; to create additional stack instances, use CreateStackInstances.

During stack set updates, any parameters overridden for a stack instance are not updated, but retain their overridden value.

You can only update the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

" + "documentation":"

Updates the parameter values for stack instances for the specified accounts, within the specified Regions. A stack instance refers to a stack in a specific account and Region.

You can only update stack instances in Regions and accounts where they already exist; to create additional stack instances, use CreateStackInstances.

During stack set updates, any parameters overridden for a stack instance are not updated, but retain their overridden value.

You can only update the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

" }, "UpdateStackSet":{ "name":"UpdateStackSet", @@ -835,7 +835,7 @@ {"shape":"InvalidOperationException"}, {"shape":"StackInstanceNotFoundException"} ], - "documentation":"

Updates the stack set, and associated stack instances in the specified accounts and regions.

Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.

" + "documentation":"

Updates the stack set, and associated stack instances in the specified accounts and Regions.

Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.

" }, "UpdateTerminationProtection":{ "name":"UpdateTerminationProtection", @@ -874,14 +874,14 @@ "members":{ "Status":{ "shape":"AccountGateStatus", - "documentation":"

The status of the account gate function.

  • SUCCEEDED: The account gate function has determined that the account and region passes any requirements for a stack set operation to occur. AWS CloudFormation proceeds with the stack operation in that account and region.

  • FAILED: The account gate function has determined that the account and region does not meet the requirements for a stack set operation to occur. AWS CloudFormation cancels the stack set operation in that account and region, and sets the stack set operation result status for that account and region to FAILED.

  • SKIPPED: AWS CloudFormation has skipped calling the account gate function for this account and region, for one of the following reasons:

    • An account gate function has not been specified for the account and region. AWS CloudFormation proceeds with the stack set operation in this account and region.

    • The AWSCloudFormationStackSetExecutionRole of the stack set adminstration account lacks permissions to invoke the function. AWS CloudFormation proceeds with the stack set operation in this account and region.

    • Either no action is necessary, or no action is possible, on the stack. AWS CloudFormation skips the stack set operation in this account and region.

" + "documentation":"

The status of the account gate function.

  • SUCCEEDED: The account gate function has determined that the account and Region passes any requirements for a stack set operation to occur. AWS CloudFormation proceeds with the stack operation in that account and Region.

  • FAILED: The account gate function has determined that the account and Region does not meet the requirements for a stack set operation to occur. AWS CloudFormation cancels the stack set operation in that account and Region, and sets the stack set operation result status for that account and Region to FAILED.

  • SKIPPED: AWS CloudFormation has skipped calling the account gate function for this account and Region, for one of the following reasons:

    • An account gate function has not been specified for the account and Region. AWS CloudFormation proceeds with the stack set operation in this account and Region.

    • The AWSCloudFormationStackSetExecutionRole of the stack set adminstration account lacks permissions to invoke the function. AWS CloudFormation proceeds with the stack set operation in this account and Region.

    • Either no action is necessary, or no action is possible, on the stack. AWS CloudFormation skips the stack set operation in this account and Region.

" }, "StatusReason":{ "shape":"AccountGateStatusReason", - "documentation":"

The reason for the account gate status assigned to this account and region for the stack set operation.

" + "documentation":"

The reason for the account gate status assigned to this account and Region for the stack set operation.

" } }, - "documentation":"

Structure that contains the results of the account gate function which AWS CloudFormation invokes, if present, before proceeding with a stack set operation in an account and region.

For each account and region, AWS CloudFormation lets you specify a Lamdba function that encapsulates any requirements that must be met before CloudFormation can proceed with a stack set operation in that account and region. CloudFormation invokes the function each time a stack set operation is requested for that account and region; if the function returns FAILED, CloudFormation cancels the operation in that account and region, and sets the stack set operation result status for that account and region to FAILED.

For more information, see Configuring a target account gate.

" + "documentation":"

Structure that contains the results of the account gate function which AWS CloudFormation invokes, if present, before proceeding with a stack set operation in an account and Region.

For each account and Region, AWS CloudFormation lets you specify a Lamdba function that encapsulates any requirements that must be met before CloudFormation can proceed with a stack set operation in that account and Region. CloudFormation invokes the function each time a stack set operation is requested for that account and Region; if the function returns FAILED, CloudFormation cancels the operation in that account and Region, and sets the stack set operation result status for that account and Region to FAILED.

For more information, see Configuring a target account gate.

" }, "AccountGateStatus":{ "type":"string", @@ -1268,7 +1268,7 @@ "members":{ "StackName":{ "shape":"StackName", - "documentation":"

The name that is associated with the stack. The name must be unique in the region in which you are creating the stack.

A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.

" + "documentation":"

The name that is associated with the stack. The name must be unique in the Region in which you are creating the stack.

A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters.

" }, "TemplateBody":{ "shape":"TemplateBody", @@ -1320,7 +1320,7 @@ }, "StackPolicyURL":{ "shape":"StackPolicyURL", - "documentation":"

Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

" + "documentation":"

Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

" }, "Tags":{ "shape":"Tags", @@ -1350,7 +1350,7 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The names of one or more AWS accounts that you want to create stack instances in the specified region(s) for.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Self-managed permissions] The names of one or more AWS accounts that you want to create stack instances in the specified Region(s) for.

You can specify Accounts or DeploymentTargets, but not both.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", @@ -1358,11 +1358,11 @@ }, "Regions":{ "shape":"RegionList", - "documentation":"

The names of one or more regions where you want to create stack instances using the specified AWS account(s).

" + "documentation":"

The names of one or more Regions where you want to create stack instances using the specified AWS account(s).

" }, "ParameterOverrides":{ "shape":"Parameters", - "documentation":"

A list of stack set parameters whose values you want to override in the selected stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and regions. When specifying parameters and their values, be aware of how AWS CloudFormation sets parameter values during stack instance operations:

  • To override the current value for a parameter, include the parameter and specify its value.

  • To leave a parameter set to its present value, you can do one of the following:

    • Do not include the parameter in the list.

    • Include the parameter and specify UsePreviousValue as true. (You cannot specify both a value and set UsePreviousValue to true.)

  • To set all overridden parameter back to the values specified in the stack set, specify a parameter list but do not include any parameters.

  • To leave all parameters set to their present values, do not specify this property at all.

During stack set updates, any parameter values overridden for a stack instance are not updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template.

" + "documentation":"

A list of stack set parameters whose values you want to override in the selected stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and Regions. When specifying parameters and their values, be aware of how AWS CloudFormation sets parameter values during stack instance operations:

  • To override the current value for a parameter, include the parameter and specify its value.

  • To leave a parameter set to its present value, you can do one of the following:

    • Do not include the parameter in the list.

    • Include the parameter and specify UsePreviousValue as true. (You cannot specify both a value and set UsePreviousValue to true.)

  • To set all overridden parameter back to the values specified in the stack set, specify a parameter list but do not include any parameters.

  • To leave all parameters set to their present values, do not specify this property at all.

During stack set updates, any parameter values overridden for a stack instance are not updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template.

" }, "OperationPreferences":{ "shape":"StackSetOperationPreferences", @@ -1400,7 +1400,7 @@ "members":{ "StackSetName":{ "shape":"StackSetName", - "documentation":"

The name to associate with the stack set. The name must be unique in the region where you create your stack set.

A stack name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and can't be longer than 128 characters.

" + "documentation":"

The name to associate with the stack set. The name must be unique in the Region where you create your stack set.

A stack name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and can't be longer than 128 characters.

" }, "Description":{ "shape":"Description", @@ -1440,7 +1440,7 @@ }, "AutoDeployment":{ "shape":"AutoDeployment", - "documentation":"

Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to the target organization or organizational unit (OU). Specify only if PermissionModel is SERVICE_MANAGED.

If you specify AutoDeployment, do not specify DeploymentTargets or Regions.

" + "documentation":"

Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to the target organization or organizational unit (OU). Specify only if PermissionModel is SERVICE_MANAGED.

" }, "ClientRequestToken":{ "shape":"ClientRequestToken", @@ -1529,7 +1529,7 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The names of the AWS accounts that you want to delete stack instances for.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Self-managed permissions] The names of the AWS accounts that you want to delete stack instances for.

You can specify Accounts or DeploymentTargets, but not both.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", @@ -1537,7 +1537,7 @@ }, "Regions":{ "shape":"RegionList", - "documentation":"

The regions where you want to delete stack set instances.

" + "documentation":"

The Regions where you want to delete stack set instances.

" }, "OperationPreferences":{ "shape":"StackSetOperationPreferences", @@ -1588,10 +1588,10 @@ }, "OrganizationalUnitIds":{ "shape":"OrganizationalUnitIdList", - "documentation":"

The organization root ID or organizational unit (OUs) IDs to which StackSets deploys.

" + "documentation":"

The organization root ID or organizational unit (OU) IDs to which StackSets deploys.

" } }, - "documentation":"

[Service-managed permissions] The AWS Organizations accounts to which StackSets deploys.

For update operations, you can specify either Accounts or OrganizationalUnitIds. For create and delete operations, specify OrganizationalUnitIds.

" + "documentation":"

[Service-managed permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization master account, even if the master account is in your organization or in an OU in your organization.

For update operations, you can specify either Accounts or OrganizationalUnitIds. For create and delete operations, specify OrganizationalUnitIds.

" }, "DeprecatedStatus":{ "type":"string", @@ -1834,7 +1834,7 @@ }, "StackInstanceRegion":{ "shape":"Region", - "documentation":"

The name of a region that's associated with this stack instance.

" + "documentation":"

The name of a Region that's associated with this stack instance.

" } } }, @@ -2052,6 +2052,10 @@ "shape":"TypeVersionId", "documentation":"

The ID of the default version of the type. The default version is used when the type version is not specified.

To set the default version of a type, use SetTypeDefaultVersion .

" }, + "IsDefaultVersion":{ + "shape":"IsDefaultVersion", + "documentation":"

Whether the specified type version is set as the default version.

" + }, "Description":{ "shape":"Description", "documentation":"

The description of the registered type.

" @@ -2540,6 +2544,7 @@ }, "exception":true }, + "IsDefaultVersion":{"type":"boolean"}, "Key":{"type":"string"}, "LastUpdatedTime":{"type":"timestamp"}, "LimitExceededException":{ @@ -2650,13 +2655,17 @@ "shape":"MaxResults", "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" }, + "Filters":{ + "shape":"StackInstanceFilters", + "documentation":"

The status that stack instances are filtered by.

" + }, "StackInstanceAccount":{ "shape":"Account", "documentation":"

The name of the AWS account that you want to list stack instances for.

" }, "StackInstanceRegion":{ "shape":"Region", - "documentation":"

The name of the region where you want to list stack instances.

" + "documentation":"

The name of the Region where you want to list stack instances.

" } } }, @@ -2732,7 +2741,7 @@ "members":{ "Summaries":{ "shape":"StackSetOperationResultSummaries", - "documentation":"

A list of StackSetOperationResultSummary structures that contain information about the specified operation results, for accounts and regions that are included in the operation.

" + "documentation":"

A list of StackSetOperationResultSummary structures that contain information about the specified operation results, for accounts and Regions that are included in the operation.

" }, "NextToken":{ "shape":"NextToken", @@ -3360,7 +3369,7 @@ }, "ExecutionRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM execution role to use to register the type. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your resource type with the appropriate credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the resource provider. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource provider handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource provider handler, thereby supplying your resource provider with the appropriate credentials.

" }, "ClientRequestToken":{ "shape":"RequestToken", @@ -3603,7 +3612,7 @@ "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource to import into your stack, such as AWS::S3::Bucket.

" + "documentation":"

The type of resource to import into your stack, such as AWS::S3::Bucket. For a list of supported resource types, see Resources that support import operations in the AWS CloudFormation User Guide.

" }, "LogicalResourceId":{ "shape":"LogicalResourceId", @@ -3716,7 +3725,7 @@ }, "StackPolicyURL":{ "shape":"StackPolicyURL", - "documentation":"

Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

" + "documentation":"

Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

" } }, "documentation":"

The input for the SetStackPolicy action.

" @@ -3997,11 +4006,11 @@ }, "Region":{ "shape":"Region", - "documentation":"

The name of the AWS region that the stack instance is associated with.

" + "documentation":"

The name of the AWS Region that the stack instance is associated with.

" }, "Account":{ "shape":"Account", - "documentation":"

[Self-managed permissions] The name of the AWS account that the stack instance is associated with.

" + "documentation":"

[Self-managed permissions] The name of the AWS account that the stack instance is associated with.

" }, "StackId":{ "shape":"StackId", @@ -4015,13 +4024,17 @@ "shape":"StackInstanceStatus", "documentation":"

The status of the stack instance, in terms of its synchronization with its associated stack set.

  • INOPERABLE: A DeleteStackInstances operation has failed and left the stack in an unstable state. Stacks in this state are excluded from further UpdateStackSet operations. You might need to perform a DeleteStackInstances operation, with RetainStacks set to true, to delete the stack instance, and then delete the stack manually.

  • OUTDATED: The stack isn't currently up to date with the stack set because:

    • The associated stack failed during a CreateStackSet or UpdateStackSet operation.

    • The stack was part of a CreateStackSet or UpdateStackSet operation that failed or was stopped before the stack was created or updated.

  • CURRENT: The stack is currently up to date with the stack set.

" }, + "StackInstanceStatus":{ + "shape":"StackInstanceComprehensiveStatus", + "documentation":"

The detailed status of the stack instance.

" + }, "StatusReason":{ "shape":"Reason", "documentation":"

The explanation for the specific status code that is assigned to this stack instance.

" }, "OrganizationalUnitId":{ "shape":"OrganizationalUnitId", - "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) ID that the stack instance is associated with.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" }, "DriftStatus":{ "shape":"StackDriftStatus", @@ -4032,7 +4045,56 @@ "documentation":"

Most recent time when CloudFormation performed a drift detection operation on the stack instance. This value will be NULL for any stack instance on which drift detection has not yet been performed.

" } }, - "documentation":"

An AWS CloudFormation stack, in a specific account and region, that's part of a stack set operation. A stack instance is a reference to an attempted or actual stack in a given account within a given region. A stack instance can exist without a stack—for example, if the stack couldn't be created for some reason. A stack instance is associated with only one stack set. Each stack instance contains the ID of its associated stack set, as well as the ID of the actual stack and the stack status.

" + "documentation":"

An AWS CloudFormation stack, in a specific account and Region, that's part of a stack set operation. A stack instance is a reference to an attempted or actual stack in a given account within a given Region. A stack instance can exist without a stack—for example, if the stack couldn't be created for some reason. A stack instance is associated with only one stack set. Each stack instance contains the ID of its associated stack set, as well as the ID of the actual stack and the stack status.

" + }, + "StackInstanceComprehensiveStatus":{ + "type":"structure", + "members":{ + "DetailedStatus":{ + "shape":"StackInstanceDetailedStatus", + "documentation":"
  • CANCELLED: The operation in the specified account and Region has been cancelled. This is either because a user has stopped the stack set operation, or because the failure tolerance of the stack set operation has been exceeded.

  • FAILED: The operation in the specified account and Region failed. If the stack set operation fails in enough accounts within a Region, the failure tolerance for the stack set operation as a whole might be exceeded.

  • INOPERABLE: A DeleteStackInstances operation has failed and left the stack in an unstable state. Stacks in this state are excluded from further UpdateStackSet operations. You might need to perform a DeleteStackInstances operation, with RetainStacks set to true, to delete the stack instance, and then delete the stack manually.

  • PENDING: The operation in the specified account and Region has yet to start.

  • RUNNING: The operation in the specified account and Region is currently in progress.

  • SUCCEEDED: The operation in the specified account and Region completed successfully.

" + } + }, + "documentation":"

The detailed status of the stack instance.

" + }, + "StackInstanceDetailedStatus":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "SUCCEEDED", + "FAILED", + "CANCELLED", + "INOPERABLE" + ] + }, + "StackInstanceFilter":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"StackInstanceFilterName", + "documentation":"

The type of filter to apply.

" + }, + "Values":{ + "shape":"StackInstanceFilterValues", + "documentation":"

The status to filter by.

" + } + }, + "documentation":"

The status that stack instances are filtered by.

" + }, + "StackInstanceFilterName":{ + "type":"string", + "enum":["DETAILED_STATUS"] + }, + "StackInstanceFilterValues":{ + "type":"string", + "max":10, + "min":6 + }, + "StackInstanceFilters":{ + "type":"list", + "member":{"shape":"StackInstanceFilter"}, + "max":1 }, "StackInstanceNotFoundException":{ "type":"structure", @@ -4067,11 +4129,11 @@ }, "Region":{ "shape":"Region", - "documentation":"

The name of the AWS region that the stack instance is associated with.

" + "documentation":"

The name of the AWS Region that the stack instance is associated with.

" }, "Account":{ "shape":"Account", - "documentation":"

[Self-managed permissions] The name of the AWS account that the stack instance is associated with.

" + "documentation":"

[Self-managed permissions] The name of the AWS account that the stack instance is associated with.

" }, "StackId":{ "shape":"StackId", @@ -4085,9 +4147,13 @@ "shape":"Reason", "documentation":"

The explanation for the specific status code assigned to this stack instance.

" }, + "StackInstanceStatus":{ + "shape":"StackInstanceComprehensiveStatus", + "documentation":"

The detailed status of the stack instance.

" + }, "OrganizationalUnitId":{ "shape":"OrganizationalUnitId", - "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) ID that the stack instance is associated with.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" }, "DriftStatus":{ "shape":"StackDriftStatus", @@ -4445,10 +4511,10 @@ }, "OrganizationalUnitIds":{ "shape":"OrganizationalUnitIdList", - "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OUs) IDs to which stacks in your stack set have been deployed.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" } }, - "documentation":"

A structure that contains information about a stack set. A stack set enables you to provision stacks into AWS accounts and across regions by using a single CloudFormation template. In the stack set, you specify the template to use, as well as any parameters and capabilities that the template requires.

" + "documentation":"

A structure that contains information about a stack set. A stack set enables you to provision stacks into AWS accounts and across Regions by using a single CloudFormation template. In the stack set, you specify the template to use, as well as any parameters and capabilities that the template requires.

" }, "StackSetARN":{"type":"string"}, "StackSetDriftDetectionDetails":{ @@ -4554,7 +4620,7 @@ }, "Status":{ "shape":"StackSetOperationStatus", - "documentation":"

The status of the operation.

  • FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that you've set for an operation is applied for each region during stack create and update operations. If the number of failed stacks within a region exceeds the failure tolerance, the status of the operation in the region is set to FAILED. This in turn sets the status of the operation as a whole to FAILED, and AWS CloudFormation cancels the operation in any remaining regions.

  • QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of operations. The operation is queued to be performed. For more information, see the stack set operation status codes in the AWS CloudFormation User Guide.

  • RUNNING: The operation is currently being performed.

  • STOPPED: The user has cancelled the operation.

  • STOPPING: The operation is in the process of stopping, at user request.

  • SUCCEEDED: The operation completed creating or updating all the specified stacks without exceeding the failure tolerance for the operation.

" + "documentation":"

The status of the operation.

  • FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that you've set for an operation is applied for each Region during stack create and update operations. If the number of failed stacks within a Region exceeds the failure tolerance, the status of the operation in the Region is set to FAILED. This in turn sets the status of the operation as a whole to FAILED, and AWS CloudFormation cancels the operation in any remaining Regions.

  • QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of operations, the operation is queued to be performed. For more information, see the stack set operation status codes in the AWS CloudFormation User Guide.

  • RUNNING: The operation is currently being performed.

  • STOPPED: The user has cancelled the operation.

  • STOPPING: The operation is in the process of stopping, at user request.

  • SUCCEEDED: The operation completed creating or updating all the specified stacks without exceeding the failure tolerance for the operation.

" }, "OperationPreferences":{ "shape":"StackSetOperationPreferences", @@ -4574,11 +4640,11 @@ }, "CreationTimestamp":{ "shape":"Timestamp", - "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.

" + "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested Regions, before actually creating the first stacks.

" }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.

" + "documentation":"

The time at which the stack set operation ended, across all accounts and Regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or Region.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", @@ -4605,19 +4671,19 @@ "members":{ "RegionOrder":{ "shape":"RegionList", - "documentation":"

The order of the regions in where you want to perform the stack operation.

" + "documentation":"

The order of the Regions in where you want to perform the stack operation.

" }, "FailureToleranceCount":{ "shape":"FailureToleranceCount", - "documentation":"

The number of accounts, per region, for which this operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).

" + "documentation":"

The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. If the operation is stopped in a Region, AWS CloudFormation doesn't attempt the operation in any subsequent Regions.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).

" }, "FailureTolerancePercentage":{ "shape":"FailureTolerancePercentage", - "documentation":"

The percentage of accounts, per region, for which this stack operation can fail before AWS CloudFormation stops the operation in that region. If the operation is stopped in a region, AWS CloudFormation doesn't attempt the operation in any subsequent regions.

When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage, but not both.

" + "documentation":"

The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. If the operation is stopped in a Region, AWS CloudFormation doesn't attempt the operation in any subsequent Regions.

When calculating the number of accounts based on the specified percentage, AWS CloudFormation rounds down to the next whole number.

Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage, but not both.

" }, "MaxConcurrentCount":{ "shape":"MaxConcurrentCount", - "documentation":"

The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of FailureToleranceCountMaxConcurrentCount is at most one more than the FailureToleranceCount .

Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.

Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

" + "documentation":"

The maximum number of accounts in which to perform this operation at one time. This is dependent on the value of FailureToleranceCount. MaxConcurrentCount is at most one more than the FailureToleranceCount.

Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.

Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

" }, "MaxConcurrentPercentage":{ "shape":"MaxConcurrentPercentage", @@ -4645,15 +4711,15 @@ "members":{ "Account":{ "shape":"Account", - "documentation":"

[Self-managed permissions] The name of the AWS account for this operation result.

" + "documentation":"

[Self-managed permissions] The name of the AWS account for this operation result.

" }, "Region":{ "shape":"Region", - "documentation":"

The name of the AWS region for this operation result.

" + "documentation":"

The name of the AWS Region for this operation result.

" }, "Status":{ "shape":"StackSetOperationResultStatus", - "documentation":"

The result status of the stack set operation for the given account in the given region.

  • CANCELLED: The operation in the specified account and region has been cancelled. This is either because a user has stopped the stack set operation, or because the failure tolerance of the stack set operation has been exceeded.

  • FAILED: The operation in the specified account and region failed.

    If the stack set operation fails in enough accounts within a region, the failure tolerance for the stack set operation as a whole might be exceeded.

  • RUNNING: The operation in the specified account and region is currently in progress.

  • PENDING: The operation in the specified account and region has yet to start.

  • SUCCEEDED: The operation in the specified account and region completed successfully.

" + "documentation":"

The result status of the stack set operation for the given account in the given Region.

  • CANCELLED: The operation in the specified account and Region has been cancelled. This is either because a user has stopped the stack set operation, or because the failure tolerance of the stack set operation has been exceeded.

  • FAILED: The operation in the specified account and Region failed.

    If the stack set operation fails in enough accounts within a Region, the failure tolerance for the stack set operation as a whole might be exceeded.

  • RUNNING: The operation in the specified account and Region is currently in progress.

  • PENDING: The operation in the specified account and Region has yet to start.

  • SUCCEEDED: The operation in the specified account and Region completed successfully.

" }, "StatusReason":{ "shape":"Reason", @@ -4665,10 +4731,10 @@ }, "OrganizationalUnitId":{ "shape":"OrganizationalUnitId", - "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) ID for this operation result.

" + "documentation":"

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" } }, - "documentation":"

The structure that contains information about a specified operation's results for a given account in a given region.

" + "documentation":"

The structure that contains information about a specified operation's results for a given account in a given Region.

" }, "StackSetOperationStatus":{ "type":"string", @@ -4698,15 +4764,15 @@ }, "Status":{ "shape":"StackSetOperationStatus", - "documentation":"

The overall status of the operation.

  • FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that you've set for an operation is applied for each region during stack create and update operations. If the number of failed stacks within a region exceeds the failure tolerance, the status of the operation in the region is set to FAILED. This in turn sets the status of the operation as a whole to FAILED, and AWS CloudFormation cancels the operation in any remaining regions.

  • QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of operations. The operation is queued to be performed. For more information, see the stack set operation status codes in the AWS CloudFormation User Guide.

  • RUNNING: The operation is currently being performed.

  • STOPPED: The user has cancelled the operation.

  • STOPPING: The operation is in the process of stopping, at user request.

  • SUCCEEDED: The operation completed creating or updating all the specified stacks without exceeding the failure tolerance for the operation.

" + "documentation":"

The overall status of the operation.

  • FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that you've set for an operation is applied for each Region during stack create and update operations. If the number of failed stacks within a Region exceeds the failure tolerance, the status of the operation in the Region is set to FAILED. This in turn sets the status of the operation as a whole to FAILED, and AWS CloudFormation cancels the operation in any remaining Regions.

  • QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of operations, the operation is queued to be performed. For more information, see the stack set operation status codes in the AWS CloudFormation User Guide.

  • RUNNING: The operation is currently being performed.

  • STOPPED: The user has cancelled the operation.

  • STOPPING: The operation is in the process of stopping, at user request.

  • SUCCEEDED: The operation completed creating or updating all the specified stacks without exceeding the failure tolerance for the operation.

" }, "CreationTimestamp":{ "shape":"Timestamp", - "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.

" + "documentation":"

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested Regions, before actually creating the first stacks.

" }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.

" + "documentation":"

The time at which the stack set operation ended, across all accounts and Regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or Region.

" } }, "documentation":"

The structures that contain summary information about the specified operation.

" @@ -5087,6 +5153,10 @@ "shape":"TypeVersionId", "documentation":"

The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.

" }, + "IsDefaultVersion":{ + "shape":"IsDefaultVersion", + "documentation":"

Whether the specified type version is set as the default version.

" + }, "Arn":{ "shape":"TypeArn", "documentation":"

The Amazon Resource Name (ARN) of the type version.

" @@ -5128,7 +5198,7 @@ }, "StackPolicyDuringUpdateURL":{ "shape":"StackPolicyDuringUpdateURL", - "documentation":"

Location of a file containing the temporary overriding stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.

If you want to update protected resources, specify a temporary overriding stack policy during this update. If you do not specify a stack policy, the current policy that is associated with the stack will be used.

" + "documentation":"

Location of a file containing the temporary overriding stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.

If you want to update protected resources, specify a temporary overriding stack policy during this update. If you do not specify a stack policy, the current policy that is associated with the stack will be used.

" }, "Parameters":{ "shape":"Parameters", @@ -5156,7 +5226,7 @@ }, "StackPolicyURL":{ "shape":"StackPolicyURL", - "documentation":"

Location of a file containing the updated stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you do not specify a stack policy, the current policy that is associated with the stack is unchanged.

" + "documentation":"

Location of a file containing the updated stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you do not specify a stack policy, the current policy that is associated with the stack is unchanged.

" }, "NotificationARNs":{ "shape":"NotificationARNs", @@ -5186,7 +5256,7 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The names of one or more AWS accounts for which you want to update parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the specified accounts and regions.

You can specify Accounts or DeploymentTargets, but not both.

" + "documentation":"

[Self-managed permissions] The names of one or more AWS accounts for which you want to update parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the specified accounts and Regions.

You can specify Accounts or DeploymentTargets, but not both.

" }, "DeploymentTargets":{ "shape":"DeploymentTargets", @@ -5194,11 +5264,11 @@ }, "Regions":{ "shape":"RegionList", - "documentation":"

The names of one or more regions in which you want to update parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the specified accounts and regions.

" + "documentation":"

The names of one or more Regions in which you want to update parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the specified accounts and Regions.

" }, "ParameterOverrides":{ "shape":"Parameters", - "documentation":"

A list of input parameters whose values you want to update for the specified stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and regions. When specifying parameters and their values, be aware of how AWS CloudFormation sets parameter values during stack instance update operations:

  • To override the current value for a parameter, include the parameter and specify its value.

  • To leave a parameter set to its present value, you can do one of the following:

    • Do not include the parameter in the list.

    • Include the parameter and specify UsePreviousValue as true. (You cannot specify both a value and set UsePreviousValue to true.)

  • To set all overridden parameter back to the values specified in the stack set, specify a parameter list but do not include any parameters.

  • To leave all parameters set to their present values, do not specify this property at all.

During stack set updates, any parameter values overridden for a stack instance are not updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

" + "documentation":"

A list of input parameters whose values you want to update for the specified stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and Regions. When specifying parameters and their values, be aware of how AWS CloudFormation sets parameter values during stack instance update operations:

  • To override the current value for a parameter, include the parameter and specify its value.

  • To leave a parameter set to its present value, you can do one of the following:

    • Do not include the parameter in the list.

    • Include the parameter and specify UsePreviousValue as true. (You cannot specify both a value and set UsePreviousValue to true.)

  • To set all overridden parameter back to the values specified in the stack set, specify a parameter list but do not include any parameters.

  • To leave all parameters set to their present values, do not specify this property at all.

During stack set updates, any parameter values overridden for a stack instance are not updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

" }, "OperationPreferences":{ "shape":"StackSetOperationPreferences", @@ -5297,11 +5367,11 @@ }, "Accounts":{ "shape":"AccountList", - "documentation":"

[Self-managed permissions] The accounts in which to update associated stack instances. If you specify accounts, you must also specify the regions in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and regions, while leaving all other stack instances with their existing stack instance status.

" + "documentation":"

[Self-managed permissions] The accounts in which to update associated stack instances. If you specify accounts, you must also specify the Regions in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" }, "Regions":{ "shape":"RegionList", - "documentation":"

The regions in which to update associated stack instances. If you specify regions, you must also specify accounts in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and regions, while leaving all other stack instances with their existing stack instance status.

" + "documentation":"

The Regions in which to update associated stack instances. If you specify Regions, you must also specify accounts in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.

" } } }, diff --git a/services/cloudformation/src/main/resources/codegen-resources/waiters-2.json b/services/cloudformation/src/main/resources/codegen-resources/waiters-2.json index a8d91064666e..73e1bf30b86a 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/waiters-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/waiters-2.json @@ -208,6 +208,43 @@ } ] }, + "StackRollbackComplete": { + "delay": 30, + "operation": "DescribeStacks", + "maxAttempts": 120, + "description": "Wait until stack status is UPDATE_ROLLBACK_COMPLETE.", + "acceptors": [ + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_ROLLBACK_COMPLETE", + "matcher": "pathAll", + "state": "success" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_FAILED", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "UPDATE_ROLLBACK_FAILED", + "matcher": "pathAny", + "state": "failure" + }, + { + "argument": "Stacks[].StackStatus", + "expected": "DELETE_FAILED", + "matcher": "pathAny", + "state": "failure" + }, + { + "expected": "ValidationError", + "matcher": "error", + "state": "failure" + } + ] + }, "ChangeSetCreateComplete": { "delay": 30, "operation": "DescribeChangeSet", diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index 7efcbeeea314..313133624556 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudfront/src/main/resources/codegen-resources/service-2.json b/services/cloudfront/src/main/resources/codegen-resources/service-2.json index f93309e84e1d..8da11801ed32 100644 --- a/services/cloudfront/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudfront/src/main/resources/codegen-resources/service-2.json @@ -1018,7 +1018,7 @@ }, "TargetOriginId":{ "shape":"string", - "documentation":"

The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.

" + "documentation":"

The value of ID for the origin that you want CloudFront to route requests to when they match this cache behavior.

" }, "ForwardedValues":{ "shape":"ForwardedValues", @@ -1026,11 +1026,11 @@ }, "TrustedSigners":{ "shape":"TrustedSigners", - "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" + "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content with Signed URLs and Signed Cookies in the Amazon CloudFront Developer Guide.

If you don’t want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it’s currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" }, "ViewerProtocolPolicy":{ "shape":"ViewerProtocolPolicy", - "documentation":"

The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. You can specify the following options:

  • allow-all: Viewers can use HTTP or HTTPS.

  • redirect-to-https: If a viewer submits an HTTP request, CloudFront returns an HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS URL. The viewer then resubmits the request using the new URL.

  • https-only: If a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 (Forbidden).

For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide.

The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the Amazon CloudFront Developer Guide.

" + "documentation":"

The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. You can specify the following options:

  • allow-all: Viewers can use HTTP or HTTPS.

  • redirect-to-https: If a viewer submits an HTTP request, CloudFront returns an HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS URL. The viewer then resubmits the request using the new URL.

  • https-only: If a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 (Forbidden).

For more information about requiring the HTTPS protocol, see Requiring HTTPS Between Viewers and CloudFront in the Amazon CloudFront Developer Guide.

The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects’ cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Managing Cache Expiration in the Amazon CloudFront Developer Guide.

" }, "MinTTL":{ "shape":"long", @@ -1059,10 +1059,10 @@ }, "FieldLevelEncryptionId":{ "shape":"string", - "documentation":"

The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.

" + "documentation":"

The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for this cache behavior.

" } }, - "documentation":"

A complex type that describes how CloudFront processes requests.

You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to distribute objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin is never used.

For the current limit on the number of cache behaviors that you can add to a distribution, see Amazon CloudFront Limits in the AWS General Reference.

If you don't want to specify any cache behaviors, include only an empty CacheBehaviors element. Don't include an empty CacheBehavior element, or CloudFront returns a MalformedXML error.

To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element.

To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.

For more information about cache behaviors, see Cache Behaviors in the Amazon CloudFront Developer Guide.

" + "documentation":"

A complex type that describes how CloudFront processes requests.

You must create at least as many cache behaviors (including the default cache behavior) as you have origins if you want CloudFront to serve objects from all of the origins. Each cache behavior specifies the one origin from which you want CloudFront to get objects. If you have two origins and only the default cache behavior, the default cache behavior will cause CloudFront to get objects from one of the origins, but the other origin is never used.

For the current quota (formerly known as limit) on the number of cache behaviors that you can add to a distribution, see Quotas in the Amazon CloudFront Developer Guide.

If you don’t want to specify any cache behaviors, include only an empty CacheBehaviors element. Don’t include an empty CacheBehavior element because this is invalid.

To delete all cache behaviors in an existing distribution, update the distribution configuration and include only an empty CacheBehaviors element.

To add, change, or remove one or more cache behaviors, update the distribution configuration and specify all of the cache behaviors that you want to include in the updated distribution.

For more information about cache behaviors, see Cache Behavior Settings in the Amazon CloudFront Developer Guide.

" }, "CacheBehaviorList":{ "type":"list", @@ -1368,7 +1368,7 @@ }, "Location":{ "shape":"string", - "documentation":"

The fully qualified URI of the new origin access identity just created. For example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A.

", + "documentation":"

The fully qualified URI of the new origin access identity just created.

", "location":"header", "locationName":"Location" }, @@ -1405,7 +1405,7 @@ }, "Location":{ "shape":"string", - "documentation":"

The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.

", + "documentation":"

The fully qualified URI of the new distribution resource just created.

", "location":"header", "locationName":"Location" }, @@ -1442,7 +1442,7 @@ }, "Location":{ "shape":"string", - "documentation":"

The fully qualified URI of the new distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5.

", + "documentation":"

The fully qualified URI of the new distribution resource just created.

", "location":"header", "locationName":"Location" }, @@ -1478,7 +1478,7 @@ }, "Location":{ "shape":"string", - "documentation":"

The fully qualified URI of the new configuration resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/field-level-encryption-config/EDFDVBD632BHDS5.

", + "documentation":"

The fully qualified URI of the new configuration resource just created.

", "location":"header", "locationName":"Location" }, @@ -1513,7 +1513,7 @@ }, "Location":{ "shape":"string", - "documentation":"

The fully qualified URI of the new profile resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/field-level-encryption-profile/EDFDVBD632BHDS5.

", + "documentation":"

The fully qualified URI of the new profile resource just created.

", "location":"header", "locationName":"Location" }, @@ -1588,7 +1588,7 @@ }, "Location":{ "shape":"string", - "documentation":"

The fully qualified URI of the new public key resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/cloudfront-public-key/EDFDVBD632BHDS5.

", + "documentation":"

The fully qualified URI of the new public key resource just created.

", "location":"header", "locationName":"Location" }, @@ -1624,7 +1624,7 @@ }, "Location":{ "shape":"string", - "documentation":"

The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.

", + "documentation":"

The fully qualified URI of the new streaming distribution resource just created.

", "location":"header", "locationName":"Location" }, @@ -1661,7 +1661,7 @@ }, "Location":{ "shape":"string", - "documentation":"

The fully qualified URI of the new streaming distribution resource just created. For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8.

", + "documentation":"

The fully qualified URI of the new streaming distribution resource just created.

", "location":"header", "locationName":"Location" }, @@ -1745,30 +1745,30 @@ "members":{ "HTTPPort":{ "shape":"integer", - "documentation":"

The HTTP port the custom origin listens on.

" + "documentation":"

The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.

" }, "HTTPSPort":{ "shape":"integer", - "documentation":"

The HTTPS port the custom origin listens on.

" + "documentation":"

The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.

" }, "OriginProtocolPolicy":{ "shape":"OriginProtocolPolicy", - "documentation":"

The origin protocol policy to apply to your origin.

" + "documentation":"

Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin. Valid values are:

  • http-only – CloudFront always uses HTTP to connect to the origin.

  • match-viewer – CloudFront connects to the origin using the same protocol that the viewer used to connect to CloudFront.

  • https-only – CloudFront always uses HTTPS to connect to the origin.

" }, "OriginSslProtocols":{ "shape":"OriginSslProtocols", - "documentation":"

The SSL/TLS protocols that you want CloudFront to use when communicating with your origin over HTTPS.

" + "documentation":"

Specifies the minimum SSL/TLS protocol that CloudFront uses when connecting to your origin over HTTPS. Valid values include SSLv3, TLSv1, TLSv1.1, and TLSv1.2.

For more information, see Minimum Origin SSL Protocol in the Amazon CloudFront Developer Guide.

" }, "OriginReadTimeout":{ "shape":"integer", - "documentation":"

You can create a custom origin read timeout. All timeout units are in seconds. The default origin read timeout is 30 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 4 seconds; the maximum is 60 seconds.

If you need to increase the maximum time limit, contact the AWS Support Center.

" + "documentation":"

Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the origin response timeout. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don’t specify otherwise) is 30 seconds.

For more information, see Origin Response Timeout in the Amazon CloudFront Developer Guide.

" }, "OriginKeepaliveTimeout":{ "shape":"integer", - "documentation":"

You can create a custom keep-alive timeout. All timeout units are in seconds. The default keep-alive timeout is 5 seconds, but you can configure custom timeout lengths using the CloudFront API. The minimum timeout length is 1 second; the maximum is 60 seconds.

If you need to increase the maximum time limit, contact the AWS Support Center.

" + "documentation":"

Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don’t specify otherwise) is 5 seconds.

For more information, see Origin Keep-alive Timeout in the Amazon CloudFront Developer Guide.

" } }, - "documentation":"

A custom origin or an Amazon S3 bucket configured as a website endpoint.

" + "documentation":"

A custom origin. A custom origin is any origin that is not an Amazon S3 bucket, with one exception. An Amazon S3 bucket that is configured with static website hosting is a custom origin.

" }, "DefaultCacheBehavior":{ "type":"structure", @@ -1782,7 +1782,7 @@ "members":{ "TargetOriginId":{ "shape":"string", - "documentation":"

The value of ID for the origin that you want CloudFront to route requests to when a request matches the path pattern either for a cache behavior or for the default cache behavior in your distribution.

" + "documentation":"

The value of ID for the origin that you want CloudFront to route requests to when they use the default cache behavior.

" }, "ForwardedValues":{ "shape":"ForwardedValues", @@ -1790,11 +1790,11 @@ }, "TrustedSigners":{ "shape":"TrustedSigners", - "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

If you don't want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it's currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" + "documentation":"

A complex type that specifies the AWS accounts, if any, that you want to allow to create signed URLs for private content.

If you want to require signed URLs in requests for objects in the target origin that match the PathPattern for this cache behavior, specify true for Enabled, and specify the applicable values for Quantity and Items. For more information, see Serving Private Content with Signed URLs and Signed Cookies in the Amazon CloudFront Developer Guide.

If you don’t want to require signed URLs in requests for objects that match PathPattern, specify false for Enabled and 0 for Quantity. Omit Items.

To add, change, or remove one or more trusted signers, change Enabled to true (if it’s currently false), change Quantity as applicable, and specify all of the trusted signers that you want to include in the updated distribution.

" }, "ViewerProtocolPolicy":{ "shape":"ViewerProtocolPolicy", - "documentation":"

The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. You can specify the following options:

  • allow-all: Viewers can use HTTP or HTTPS.

  • redirect-to-https: If a viewer submits an HTTP request, CloudFront returns an HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS URL. The viewer then resubmits the request using the new URL.

  • https-only: If a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 (Forbidden).

For more information about requiring the HTTPS protocol, see Using an HTTPS Connection to Access Your Objects in the Amazon CloudFront Developer Guide.

The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects' cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the Amazon CloudFront Developer Guide.

" + "documentation":"

The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern. You can specify the following options:

  • allow-all: Viewers can use HTTP or HTTPS.

  • redirect-to-https: If a viewer submits an HTTP request, CloudFront returns an HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS URL. The viewer then resubmits the request using the new URL.

  • https-only: If a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 (Forbidden).

For more information about requiring the HTTPS protocol, see Requiring HTTPS Between Viewers and CloudFront in the Amazon CloudFront Developer Guide.

The only way to guarantee that viewers retrieve an object that was fetched from the origin using HTTPS is never to use any other protocol to fetch the object. If you have recently changed from HTTP to HTTPS, we recommend that you clear your objects’ cache because cached objects are protocol agnostic. That means that an edge location will return an object from the cache regardless of whether the current request protocol matches the protocol used previously. For more information, see Managing Cache Expiration in the Amazon CloudFront Developer Guide.

" }, "MinTTL":{ "shape":"long", @@ -1823,10 +1823,10 @@ }, "FieldLevelEncryptionId":{ "shape":"string", - "documentation":"

The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for a cache behavior or for the default cache behavior in your distribution.

" + "documentation":"

The value of ID for the field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data for the default cache behavior.

" } }, - "documentation":"

A complex type that describes the default cache behavior if you don't specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior.

" + "documentation":"

A complex type that describes the default cache behavior if you don’t specify a CacheBehavior element or if request URLs don’t match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior.

" }, "DeleteCloudFrontOriginAccessIdentityRequest":{ "type":"structure", @@ -3112,7 +3112,7 @@ "members":{ "Message":{"shape":"string"} }, - "documentation":"

The argument is invalid.

", + "documentation":"

An argument is invalid.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3827,7 +3827,8 @@ "TLSv1", "TLSv1_2016", "TLSv1.1_2016", - "TLSv1.2_2018" + "TLSv1.2_2018", + "TLSv1.2_2019" ] }, "MissingBody":{ @@ -3929,30 +3930,38 @@ "members":{ "Id":{ "shape":"string", - "documentation":"

A unique identifier for the origin or origin group. The value of Id must be unique within the distribution.

When you specify the value of TargetOriginId for the default cache behavior or for another cache behavior, you indicate the origin to which you want the cache behavior to route requests by specifying the value of the Id element for that origin. When a request matches the path pattern for that cache behavior, CloudFront routes the request to the specified origin. For more information, see Cache Behavior Settings in the Amazon CloudFront Developer Guide.

" + "documentation":"

A unique identifier for the origin. This value must be unique within the distribution.

Use this value to specify the TargetOriginId in a CacheBehavior or DefaultCacheBehavior.

" }, "DomainName":{ "shape":"string", - "documentation":"

Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. If you set up your bucket to be configured as a website endpoint, enter the Amazon S3 static website hosting endpoint for the bucket.

For more information about specifying this value for different types of origins, see Origin Domain Name in the Amazon CloudFront Developer Guide.

Constraints for Amazon S3 origins:

  • If you configured Amazon S3 Transfer Acceleration for your bucket, don't specify the s3-accelerate endpoint for DomainName.

  • The bucket name must be between 3 and 63 characters long (inclusive).

  • The bucket name must contain only lowercase characters, numbers, periods, underscores, and dashes.

  • The bucket name must not contain adjacent periods.

Custom Origins: The DNS domain name for the HTTP server from which you want CloudFront to get objects for this origin, for example, www.example.com.

Constraints for custom origins:

  • DomainName must be a valid DNS name that contains only a-z, A-Z, 0-9, dot (.), hyphen (-), or underscore (_) characters.

  • The name cannot exceed 128 characters.

" + "documentation":"

The domain name for the origin.

For more information, see Origin Domain Name in the Amazon CloudFront Developer Guide.

" }, "OriginPath":{ "shape":"string", - "documentation":"

An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin. When you include the OriginPath element, specify the directory name, beginning with a /. CloudFront appends the directory name to the value of DomainName, for example, example.com/production. Do not include a / at the end of the directory name.

For example, suppose you've specified the following values for your distribution:

  • DomainName: An Amazon S3 bucket named myawsbucket.

  • OriginPath: /production

  • CNAME: example.com

When a user enters example.com/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/index.html.

When a user enters example.com/acme/index.html in a browser, CloudFront sends a request to Amazon S3 for myawsbucket/production/acme/index.html.

" + "documentation":"

An optional path that CloudFront appends to the origin domain name when CloudFront requests content from the origin.

For more information, see Origin Path in the Amazon CloudFront Developer Guide.

" }, "CustomHeaders":{ "shape":"CustomHeaders", - "documentation":"

A complex type that contains names and values for the custom headers that you want.

" + "documentation":"

A list of HTTP header names and values that CloudFront adds to requests it sends to the origin.

For more information, see Adding Custom Headers to Origin Requests in the Amazon CloudFront Developer Guide.

" }, "S3OriginConfig":{ "shape":"S3OriginConfig", - "documentation":"

A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.

" + "documentation":"

Use this type to specify an origin that is an Amazon S3 bucket that is not configured with static website hosting. To specify any other type of origin, including an Amazon S3 bucket that is configured with static website hosting, use the CustomOriginConfig type instead.

" }, "CustomOriginConfig":{ "shape":"CustomOriginConfig", - "documentation":"

A complex type that contains information about a custom origin. If the origin is an Amazon S3 bucket, use the S3OriginConfig element instead.

" + "documentation":"

Use this type to specify an origin that is a content container or HTTP server, including an Amazon S3 bucket that is configured with static website hosting. To specify an Amazon S3 bucket that is not configured with static website hosting, use the S3OriginConfig type instead.

" + }, + "ConnectionAttempts":{ + "shape":"integer", + "documentation":"

The number of times that CloudFront attempts to connect to the origin. The minimum number is 1, the maximum is 3, and the default (if you don’t specify otherwise) is 3.

For a custom origin (including an Amazon S3 bucket that’s configured with static website hosting), this value also specifies the number of times that CloudFront attempts to get a response from the origin, in the case of an Origin Response Timeout.

For more information, see Origin Connection Attempts in the Amazon CloudFront Developer Guide.

" + }, + "ConnectionTimeout":{ + "shape":"integer", + "documentation":"

The number of seconds that CloudFront waits when trying to establish a connection to the origin. The minimum timeout is 1 second, the maximum is 10 seconds, and the default (if you don’t specify otherwise) is 10 seconds.

For more information, see Origin Connection Timeout in the Amazon CloudFront Developer Guide.

" } }, - "documentation":"

A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), Amazon MediaStore, or other server from which CloudFront gets your files. This can also be an origin group, if you've created an origin group. You must specify at least one origin or origin group.

For the current limit on the number of origins or origin groups that you can specify for a distribution, see Amazon CloudFront Limits in the AWS General Reference.

" + "documentation":"

An origin.

An origin is the location where content is stored, and from which CloudFront gets content to serve to viewers. To specify an origin:

  • Use the S3OriginConfig type to specify an Amazon S3 bucket that is not configured with static website hosting.

  • Use the CustomOriginConfig type to specify various other kinds of content containers or HTTP servers, including:

    • An Amazon S3 bucket that is configured with static website hosting

    • An Elastic Load Balancing load balancer

    • An AWS Elemental MediaPackage origin

    • An AWS Elemental MediaStore container

    • Any other HTTP server, running on an Amazon EC2 instance or any other kind of host

For the current maximum number of origins that you can specify per distribution, see General Quotas on Web Distributions in the Amazon CloudFront Developer Guide (quotas were formerly referred to as limits).

" }, "OriginCustomHeader":{ "type":"structure", @@ -4154,7 +4163,7 @@ "members":{ "Message":{"shape":"string"} }, - "documentation":"

The precondition given in one or more of the request-header fields evaluated to false.

", + "documentation":"

The precondition given in one or more of the request header fields evaluated to false.

", "error":{"httpStatusCode":412}, "exception":true }, @@ -4427,7 +4436,7 @@ "documentation":"

The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront. The format of the value is:

origin-access-identity/cloudfront/ID-of-origin-access-identity

where ID-of-origin-access-identity is the value that CloudFront returned in the ID element when you created the origin access identity.

If you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty OriginAccessIdentity element.

To delete the origin access identity from an existing distribution, update the distribution configuration and include an empty OriginAccessIdentity element.

To replace the origin access identity, update the distribution configuration and specify the new origin access identity.

For more information about the origin access identity, see Serving Private Content through CloudFront in the Amazon CloudFront Developer Guide.

" } }, - "documentation":"

A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin, use the CustomOriginConfig element instead.

" + "documentation":"

A complex type that contains information about the Amazon S3 origin. If the origin is a custom origin or an S3 bucket that is configured as a website endpoint, use the CustomOriginConfig element instead.

" }, "SSLSupportMethod":{ "type":"string", @@ -5403,11 +5412,11 @@ }, "SSLSupportMethod":{ "shape":"SSLSupportMethod", - "documentation":"

If the distribution uses Aliases (alternate domain names or CNAMEs), specify which viewers the distribution accepts HTTPS connections from.

  • sni-only – The distribution accepts HTTPS connections from only viewers that support server name indication (SNI). This is recommended. Most browsers and clients released after 2010 support SNI.

  • vip – The distribution accepts HTTPS connections from all viewers including those that don’t support SNI. This is not recommended, and results in additional monthly charges from CloudFront.

If the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net, don’t set a value for this field.

" + "documentation":"

If the distribution uses Aliases (alternate domain names or CNAMEs), specify which viewers the distribution accepts HTTPS connections from.

  • sni-only – The distribution accepts HTTPS connections from only viewers that support server name indication (SNI). This is recommended. Most browsers and clients support SNI.

  • vip – The distribution accepts HTTPS connections from all viewers including those that don’t support SNI. This is not recommended, and results in additional monthly charges from CloudFront.

If the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net, don’t set a value for this field.

" }, "MinimumProtocolVersion":{ "shape":"MinimumProtocolVersion", - "documentation":"

If the distribution uses Aliases (alternate domain names or CNAMEs), specify the security policy that you want CloudFront to use for HTTPS connections with viewers. The security policy determines two settings:

  • The minimum SSL/TLS protocol that CloudFront can use to communicate with viewers.

  • The ciphers that CloudFront can use to encrypt the content that it returns to viewers.

For more information, see Security Policy and Supported Protocols and Ciphers Between Viewers and CloudFront in the Amazon CloudFront Developer Guide.

On the CloudFront console, this setting is called Security Policy.

We recommend that you specify TLSv1.2_2018 unless your viewers are using browsers or devices that don’t support TLSv1.2.

When you’re using SNI only (you set SSLSupportMethod to sni-only), you must specify TLSv1 or higher.

If the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net (you set CloudFrontDefaultCertificate to true), CloudFront automatically sets the security policy to TLSv1 regardless of the value that you set here.

" + "documentation":"

If the distribution uses Aliases (alternate domain names or CNAMEs), specify the security policy that you want CloudFront to use for HTTPS connections with viewers. The security policy determines two settings:

  • The minimum SSL/TLS protocol that CloudFront can use to communicate with viewers.

  • The ciphers that CloudFront can use to encrypt the content that it returns to viewers.

For more information, see Security Policy and Supported Protocols and Ciphers Between Viewers and CloudFront in the Amazon CloudFront Developer Guide.

On the CloudFront console, this setting is called Security Policy.

When you’re using SNI only (you set SSLSupportMethod to sni-only), you must specify TLSv1 or higher.

If the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net (you set CloudFrontDefaultCertificate to true), CloudFront automatically sets the security policy to TLSv1 regardless of the value that you set here.

" }, "Certificate":{ "shape":"string", @@ -5420,7 +5429,7 @@ "deprecated":true } }, - "documentation":"

A complex type that determines the distribution’s SSL/TLS configuration for communicating with viewers.

If the distribution doesn’t use Aliases (also known as alternate domain names or CNAMEs)—that is, if the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net—set CloudFrontDefaultCertificate to true and leave all other fields empty.

If the distribution uses Aliases (alternate domain names or CNAMEs), use the fields in this type to specify the following settings:

  • Which viewers the distribution accepts HTTPS connections from: only viewers that support server name indication (SNI) (recommended), or all viewers including those that don’t support SNI.

    • To accept HTTPS connections from only viewers that support SNI, set SSLSupportMethod to sni-only. This is recommended. Most browsers and clients released after 2010 support SNI.

    • To accept HTTPS connections from all viewers, including those that don’t support SNI, set SSLSupportMethod to vip. This is not recommended, and results in additional monthly charges from CloudFront.

  • The minimum SSL/TLS protocol version that the distribution can use to communicate with viewers. To specify a minimum version, choose a value for MinimumProtocolVersion. For more information, see Security Policy in the Amazon CloudFront Developer Guide.

  • The location of the SSL/TLS certificate, AWS Certificate Manager (ACM) (recommended) or AWS Identity and Access Management (AWS IAM). You specify the location by setting a value in one of the following fields (not both):

    • ACMCertificateArn

    • IAMCertificateId

All distributions support HTTPS connections from viewers. To require viewers to use HTTPS only, or to redirect them from HTTP to HTTPS, use ViewerProtocolPolicy in the CacheBehavior or DefaultCacheBehavior. To specify how CloudFront should use SSL/TLS to communicate with your custom origin, use CustomOriginConfig.

For more information, see Using HTTPS with CloudFront and Using Alternate Domain Names and HTTPS in the Amazon CloudFront Developer Guide.

" + "documentation":"

A complex type that determines the distribution’s SSL/TLS configuration for communicating with viewers.

If the distribution doesn’t use Aliases (also known as alternate domain names or CNAMEs)—that is, if the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net—set CloudFrontDefaultCertificate to true and leave all other fields empty.

If the distribution uses Aliases (alternate domain names or CNAMEs), use the fields in this type to specify the following settings:

  • Which viewers the distribution accepts HTTPS connections from: only viewers that support server name indication (SNI) (recommended), or all viewers including those that don’t support SNI.

    • To accept HTTPS connections from only viewers that support SNI, set SSLSupportMethod to sni-only. This is recommended. Most browsers and clients support SNI.

    • To accept HTTPS connections from all viewers, including those that don’t support SNI, set SSLSupportMethod to vip. This is not recommended, and results in additional monthly charges from CloudFront.

  • The minimum SSL/TLS protocol version that the distribution can use to communicate with viewers. To specify a minimum version, choose a value for MinimumProtocolVersion. For more information, see Security Policy in the Amazon CloudFront Developer Guide.

  • The location of the SSL/TLS certificate, AWS Certificate Manager (ACM) (recommended) or AWS Identity and Access Management (AWS IAM). You specify the location by setting a value in one of the following fields (not both):

    • ACMCertificateArn

    • IAMCertificateId

All distributions support HTTPS connections from viewers. To require viewers to use HTTPS only, or to redirect them from HTTP to HTTPS, use ViewerProtocolPolicy in the CacheBehavior or DefaultCacheBehavior. To specify how CloudFront should use SSL/TLS to communicate with your custom origin, use CustomOriginConfig.

For more information, see Using HTTPS with CloudFront and Using Alternate Domain Names and HTTPS in the Amazon CloudFront Developer Guide.

" }, "ViewerProtocolPolicy":{ "type":"string", diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index 9744e145b871..e5c4607ba25c 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index 36aa009b901f..9d86b5e9d4c8 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudhsmv2/src/main/resources/codegen-resources/service-2.json b/services/cloudhsmv2/src/main/resources/codegen-resources/service-2.json index 51c2a78b733b..00344f7caacc 100644 --- a/services/cloudhsmv2/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudhsmv2/src/main/resources/codegen-resources/service-2.json @@ -270,7 +270,7 @@ }, "SourceRegion":{ "shape":"Region", - "documentation":"

The AWS region that contains the source backup from which the new backup was copied.

" + "documentation":"

The AWS Region that contains the source backup from which the new backup was copied.

" }, "SourceBackup":{ "shape":"BackupId", @@ -278,13 +278,16 @@ }, "SourceCluster":{ "shape":"ClusterId", - "documentation":"

The identifier (ID) of the cluster containing the source backup from which the new backup was copied. .

" + "documentation":"

The identifier (ID) of the cluster containing the source backup from which the new backup was copied.

" }, "DeleteTimestamp":{ "shape":"Timestamp", "documentation":"

The date and time when the backup will be permanently deleted.

" }, - "TagList":{"shape":"TagList"} + "TagList":{ + "shape":"TagList", + "documentation":"

The list of tags for the backup.

" + } }, "documentation":"

Contains information about a backup of an AWS CloudHSM cluster. All backup objects contain the BackupId, BackupState, ClusterId, and CreateTimestamp parameters. Backups that were copied into a destination region additionally contain the CopyTimestamp, SourceBackup, SourceCluster, and SourceRegion paramters. A backup that is pending deletion will include the DeleteTimestamp parameter.

" }, @@ -387,6 +390,7 @@ "members":{ "Message":{"shape":"errorMessage"} }, + "documentation":"

The request was rejected because of a tagging failure. Verify the tag conditions in all applicable policies, and then retry the request.

", "exception":true }, "Cluster":{ @@ -444,7 +448,10 @@ "shape":"Certificates", "documentation":"

Contains one or more certificates or a certificate signing request (CSR).

" }, - "TagList":{"shape":"TagList"} + "TagList":{ + "shape":"TagList", + "documentation":"

The list of tags for the cluster.

" + } }, "documentation":"

Contains information about an AWS CloudHSM cluster.

" }, @@ -485,7 +492,10 @@ "shape":"BackupId", "documentation":"

The ID of the backup that will be copied to the destination region.

" }, - "TagList":{"shape":"TagList"} + "TagList":{ + "shape":"TagList", + "documentation":"

Tags to apply to the destination backup during creation. If you specify tags, only these tags will be applied to the destination backup. If you do not specify tags, the service copies tags from the source backup to the destination backup.

" + } } }, "CopyBackupToRegionResponse":{ @@ -516,7 +526,10 @@ "shape":"BackupId", "documentation":"

The identifier (ID) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID, use DescribeBackups.

" }, - "TagList":{"shape":"TagList"} + "TagList":{ + "shape":"TagList", + "documentation":"

Tags to apply to the CloudHSM cluster during creation.

" + } } }, "CreateClusterResponse":{ diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index 0940d2017b90..31b3e8e07747 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index 6df81b7a0b01..59f246e5d192 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index 8baff580330a..70a4fbd776d6 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index 26a07f9f5713..7f1fecc61e5d 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json index 913f76e89c4e..db4eb5670cea 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json @@ -344,7 +344,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Displays the tags associated with a CloudWatch resource. Alarms support tagging.

" + "documentation":"

Displays the tags associated with a CloudWatch resource. Currently, alarms and Contributor Insights rules support tagging.

" }, "PutAnomalyDetector":{ "name":"PutAnomalyDetector", @@ -469,7 +469,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. Currently, the only CloudWatch resources that can be tagged are alarms.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with an alarm that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a resource.

" + "documentation":"

Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. Currently, the only CloudWatch resources that can be tagged are alarms and Contributor Insights rules.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with an alarm that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a CloudWatch resource.

" }, "UntagResource":{ "name":"UntagResource", @@ -594,7 +594,7 @@ "documentation":"

The metric dimensions associated with the anomaly detection model.

" }, "Stat":{ - "shape":"Stat", + "shape":"AnomalyDetectorMetricStat", "documentation":"

The statistic associated with the anomaly detection model.

" }, "Configuration":{ @@ -626,7 +626,15 @@ "type":"list", "member":{"shape":"Range"} }, - "AnomalyDetectorMetricTimezone":{"type":"string"}, + "AnomalyDetectorMetricStat":{ + "type":"string", + "pattern":"(SampleCount|Average|Sum|Minimum|Maximum|p(\\d{1,2}|100)(\\.\\d{0,2})?|[ou]\\d+(\\.\\d*)?)(_E|_L|_H)?" + }, + "AnomalyDetectorMetricTimezone":{ + "type":"string", + "max":50, + "pattern":".*" + }, "AnomalyDetectorStateValue":{ "type":"string", "enum":[ @@ -927,7 +935,7 @@ "documentation":"

The metric dimensions associated with the anomaly detection model to delete.

" }, "Stat":{ - "shape":"Stat", + "shape":"AnomalyDetectorMetricStat", "documentation":"

The statistic associated with the anomaly detection model to delete.

" } } @@ -1945,7 +1953,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

The ARN of the CloudWatch resource that you want to view tags for. For more information on ARN format, see Example ARNs in the Amazon Web Services General Reference.

" + "documentation":"

The ARN of the CloudWatch resource that you want to view tags for.

The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

For more information on ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

" } } }, @@ -2395,7 +2403,7 @@ "documentation":"

The metric dimensions to create the anomaly detection model for.

" }, "Stat":{ - "shape":"Stat", + "shape":"AnomalyDetectorMetricStat", "documentation":"

The statistic to use for the metric and the anomaly detection model.

" }, "Configuration":{ @@ -2494,6 +2502,10 @@ "RuleDefinition":{ "shape":"InsightRuleDefinition", "documentation":"

The definition of the rule, as a JSON object. For details on the valid syntax, see Contributor Insights Rule Syntax.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of key-value pairs to associate with the Contributor Insights rule. You can associate as many as 50 tags with a rule.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only the resources that have certain tag values.

To be able to associate tags with a rule, you must have the cloudwatch:TagResource permission in addition to the cloudwatch:PutInsightRule permission.

If you are using this operation to update an existing Contributor Insights rule, any tags you specify in this parameter are ignored. To change the tags of an existing rule, use TagResource.

" } } }, @@ -2863,7 +2875,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

The ARN of the CloudWatch alarm that you're adding tags to. The ARN format is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

" + "documentation":"

The ARN of the CloudWatch resource that you're adding tags to.

The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

For more information on ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

" }, "Tags":{ "shape":"TagList", @@ -2901,7 +2913,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

The ARN of the CloudWatch resource that you're removing tags from. For more information on ARN format, see Example ARNs in the Amazon Web Services General Reference.

" + "documentation":"

The ARN of the CloudWatch resource that you're removing tags from.

The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

For more information on ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

" }, "TagKeys":{ "shape":"TagKeyList", diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index 91df00f98339..bba3e676e9ea 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json b/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json index 6b4e3a35f7b2..aae7ebd3dceb 100644 --- a/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatchevents/src/main/resources/codegen-resources/service-2.json @@ -23,7 +23,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"}, {"shape":"InvalidStateException"}, - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

Activates a partner event source that has been deactivated. Once activated, your matching event bus will start receiving events from the event source.

" }, @@ -41,7 +42,8 @@ {"shape":"InvalidStateException"}, {"shape":"InternalException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

Creates a new event bus within your account. This can be a custom event bus which you can use to receive events from your custom applications and services, or it can be a partner event bus which can be matched to a partner event source.

" }, @@ -57,7 +59,8 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"InternalException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

Called by an SaaS partner to create a partner event source. This operation is not used by AWS customers.

Each partner event source can be used by one AWS account to create a matching partner event bus in that AWS account. A SaaS partner must create one partner event source for each AWS account that wants to receive those event types.

A partner event source creates events based on resources within the SaaS partner's service or application.

An AWS account that creates a partner event bus that matches the partner event source can use that event bus to receive events from the partner, and then process them using AWS Events rules and targets.

Partner event source names follow this format:

partner_name/event_namespace/event_name

partner_name is determined during partner registration and identifies the partner to AWS customers. event_namespace is determined by the partner and is a way for the partner to categorize their events. event_name is determined by the partner, and should uniquely identify an event-generating resource within the partner system. The combination of event_namespace and event_name should help AWS customers decide whether to create an event bus to receive these events.

" }, @@ -72,7 +75,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"}, {"shape":"InvalidStateException"}, - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

You can use this operation to temporarily stop receiving events from the specified partner event source. The matching event bus is not deleted.

When you deactivate a partner event source, the source goes into PENDING state. If it remains in PENDING state for more than two weeks, it is deleted.

To activate a deactivated partner event source, use ActivateEventSource.

" }, @@ -98,7 +102,8 @@ "input":{"shape":"DeletePartnerEventSourceRequest"}, "errors":[ {"shape":"InternalException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

This operation is used by SaaS partners to delete a partner event source. This operation is not used by AWS customers.

When you delete an event source, the status of the corresponding partner event bus in the AWS customer account becomes DELETED.

" }, @@ -141,7 +146,8 @@ "output":{"shape":"DescribeEventSourceResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

This operation lists details about a partner event source that is shared with your account.

" }, @@ -155,7 +161,8 @@ "output":{"shape":"DescribePartnerEventSourceResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

An SaaS partner can use this operation to list details about a partner event source that they have created. AWS customers do not use this operation. Instead, AWS customers can use DescribeEventSource to see details about a partner event source that is shared with them.

" }, @@ -225,7 +232,8 @@ "input":{"shape":"ListEventSourcesRequest"}, "output":{"shape":"ListEventSourcesResponse"}, "errors":[ - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

You can use this to see all the partner event sources that have been shared with your AWS account. For more information about partner event sources, see CreateEventBus.

" }, @@ -239,7 +247,8 @@ "output":{"shape":"ListPartnerEventSourceAccountsResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

An SaaS partner can use this operation to display the AWS account ID that a particular partner event source name is associated with. This operation is not used by AWS customers.

" }, @@ -252,7 +261,8 @@ "input":{"shape":"ListPartnerEventSourcesRequest"}, "output":{"shape":"ListPartnerEventSourcesResponse"}, "errors":[ - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

An SaaS partner can use this operation to list all the partner event source names that they have created. This operation is not used by AWS customers.

" }, @@ -334,7 +344,8 @@ "input":{"shape":"PutPartnerEventsRequest"}, "output":{"shape":"PutPartnerEventsResponse"}, "errors":[ - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

This is used by SaaS partners to write events to a customer's partner event bus. AWS customers do not use this operation.

" }, @@ -351,7 +362,7 @@ {"shape":"InternalException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Running PutPermission permits the specified AWS account or AWS organization to put events to the specified event bus. CloudWatch Events rules in your account are triggered by these events arriving to an event bus in your account.

For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target.

To enable multiple AWS accounts to put events to your event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as \"*\" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

The permission policy on the default event bus cannot exceed 10 KB in size.

" + "documentation":"

Running PutPermission permits the specified AWS account or AWS organization to put events to the specified event bus. Amazon EventBridge (CloudWatch Events) rules in your account are triggered by these events arriving to an event bus in your account.

For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target.

To enable multiple AWS accounts to put events to your event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as \"*\" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

The permission policy on the default event bus cannot exceed 10 KB in size.

" }, "PutRule":{ "name":"PutRule", @@ -386,7 +397,7 @@ {"shape":"ManagedRuleException"}, {"shape":"InternalException"} ], - "documentation":"

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for Events:

  • EC2 instances

  • SSM Run Command

  • SSM Automation

  • AWS Lambda functions

  • Data streams in Amazon Kinesis Data Streams

  • Data delivery streams in Amazon Kinesis Data Firehose

  • Amazon ECS tasks

  • AWS Step Functions state machines

  • AWS Batch jobs

  • AWS CodeBuild projects

  • Pipelines in AWS CodePipeline

  • Amazon Inspector assessment templates

  • Amazon SNS topics

  • Amazon SQS queues, including FIFO queues

  • The default event bus of another AWS account

Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon CloudWatch Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, and AWS Step Functions state machines, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon CloudWatch Pricing.

Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different AWS account.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" + "documentation":"

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for Events:

  • EC2 instances

  • SSM Run Command

  • SSM Automation

  • AWS Lambda functions

  • Data streams in Amazon Kinesis Data Streams

  • Data delivery streams in Amazon Kinesis Data Firehose

  • Amazon ECS tasks

  • AWS Step Functions state machines

  • AWS Batch jobs

  • AWS CodeBuild projects

  • Pipelines in AWS CodePipeline

  • Amazon Inspector assessment templates

  • Amazon SNS topics

  • Amazon SQS queues, including FIFO queues

  • The default event bus of another AWS account

  • Amazon API Gateway REST APIs

Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, AWS Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge (CloudWatch Events) Pricing.

Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different AWS account.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" }, "RemovePermission":{ "name":"RemovePermission", @@ -462,7 +473,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ManagedRuleException"} ], - "documentation":"

Removes one or more tags from the specified EventBridge resource. In CloudWatch Events, rules and event buses can be tagged.

" + "documentation":"

Removes one or more tags from the specified EventBridge resource. In Amazon EventBridge (CloudWatch Events, rules and event buses can be tagged.

" } }, "shapes":{ @@ -998,6 +1009,39 @@ ] }, "EventTime":{"type":"timestamp"}, + "HeaderKey":{ + "type":"string", + "max":512, + "pattern":"^[!#$%&'*+-.^_`|~0-9a-zA-Z]+$" + }, + "HeaderParametersMap":{ + "type":"map", + "key":{"shape":"HeaderKey"}, + "value":{"shape":"HeaderValue"} + }, + "HeaderValue":{ + "type":"string", + "max":512, + "pattern":"^[ \\t]*[\\x20-\\x7E]+([ \\t]+[\\x20-\\x7E]+)*[ \\t]*$" + }, + "HttpParameters":{ + "type":"structure", + "members":{ + "PathParameterValues":{ + "shape":"PathParameterList", + "documentation":"

The path parameter values to be used to populate API Gateway REST API path wildcards (\"*\").

" + }, + "HeaderParameters":{ + "shape":"HeaderParametersMap", + "documentation":"

The headers that need to be sent as part of request invoking the API Gateway REST API.

" + }, + "QueryStringParameters":{ + "shape":"QueryStringParametersMap", + "documentation":"

The query string keys/values that need to be sent as part of request invoking the API Gateway REST API.

" + } + }, + "documentation":"

These are custom parameter to be used when the target is an API Gateway REST APIs.

" + }, "InputTransformer":{ "type":"structure", "required":["InputTemplate"], @@ -1355,6 +1399,13 @@ "min":1, "pattern":"[\\.\\-_A-Za-z0-9]+" }, + "OperationDisabledException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The operation you are attempting is not available in this region.

", + "exception":true + }, "PartnerEventSource":{ "type":"structure", "members":{ @@ -1405,6 +1456,14 @@ "min":1, "pattern":"aws\\.partner/[\\.\\-_A-Za-z0-9]+/[/\\.\\-_A-Za-z0-9]*" }, + "PathParameter":{ + "type":"string", + "pattern":"^(?!\\s*$).+" + }, + "PathParameterList":{ + "type":"list", + "member":{"shape":"PathParameter"} + }, "PolicyLengthExceededException":{ "type":"structure", "members":{ @@ -1709,6 +1768,21 @@ "type":"list", "member":{"shape":"PutTargetsResultEntry"} }, + "QueryStringKey":{ + "type":"string", + "max":512, + "pattern":"[^\\x00-\\x1F\\x7F]+" + }, + "QueryStringParametersMap":{ + "type":"map", + "key":{"shape":"QueryStringKey"}, + "value":{"shape":"QueryStringValue"} + }, + "QueryStringValue":{ + "type":"string", + "max":512, + "pattern":"[^\\x00-\\x09\\x0B\\x0C\\x0E-\\x1F\\x7F]+" + }, "RemovePermissionRequest":{ "type":"structure", "required":["StatementId"], @@ -2059,6 +2133,10 @@ "SqsParameters":{ "shape":"SqsParameters", "documentation":"

Contains the message group ID to use when the target is a FIFO queue.

If you specify an SQS FIFO queue as a target, the queue must have content-based deduplication enabled.

" + }, + "HttpParameters":{ + "shape":"HttpParameters", + "documentation":"

Contains the HTTP parameters to use when the target is a API Gateway REST endpoint.

If you specify an API Gateway REST API as a target, you can use this parameter to specify headers, path parameter, query string keys/values as part of your target invoking request.

" } }, "documentation":"

Targets are the resources to be invoked when a rule is triggered. For a complete list of services and resources that can be set as a target, see PutTargets.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

" diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index f757f71b1e30..0df6a9744817 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json b/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json index 7d69f02ed878..4efa056f1aad 100644 --- a/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json @@ -58,7 +58,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceAlreadyExistsException"} ], - "documentation":"

Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

Exporting to S3 buckets that are encrypted with AES-256 is supported. Exporting to S3 buckets encrypted with SSE-KMS is not supported.

" + "documentation":"

Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

Exporting to S3 buckets that are encrypted with AES-256 is supported. Exporting to S3 buckets encrypted with SSE-KMS is not supported.

" }, "CreateLogGroup":{ "name":"CreateLogGroup", @@ -151,6 +151,20 @@ ], "documentation":"

Deletes the specified metric filter.

" }, + "DeleteQueryDefinition":{ + "name":"DeleteQueryDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteQueryDefinitionRequest"}, + "output":{"shape":"DeleteQueryDefinitionResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", "http":{ @@ -282,6 +296,19 @@ ], "documentation":"

Returns a list of CloudWatch Logs Insights queries that are scheduled, executing, or have been executed recently in this account. You can request all queries, or limit it to queries of a specific log group or queries with a certain status.

" }, + "DescribeQueryDefinitions":{ + "name":"DescribeQueryDefinitions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeQueryDefinitionsRequest"}, + "output":{"shape":"DescribeQueryDefinitionsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "DescribeResourcePolicies":{ "name":"DescribeResourcePolicies", "http":{ @@ -370,7 +397,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns a list of the fields that are included in log events in the specified log group, along with the percentage of log events that contain each field. The search is limited to a time period that you specify.

In the results, fields that start with @ are fields generated by CloudWatch Logs. For example, @timestamp is the timestamp of each log event.

The response results are sorted by the frequency percentage, starting with the highest percentage.

" + "documentation":"

Returns a list of the fields that are included in log events in the specified log group, along with the percentage of log events that contain each field. The search is limited to a time period that you specify.

In the results, fields that start with @ are fields generated by CloudWatch Logs. For example, @timestamp is the timestamp of each log event. For more information about the fields that are generated by CloudWatch logs, see Supported Logs and Discovered Fields.

The response results are sorted by the frequency percentage, starting with the highest percentage.

" }, "GetLogRecord":{ "name":"GetLogRecord", @@ -401,7 +428,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns the results from the specified query.

Only the fields requested in the query are returned, along with a @ptr field which is the identifier for the log record. You can use the value of @ptr in a operation to get the full log record.

GetQueryResults does not start a query execution. To run a query, use .

If the value of the Status field in the output is Running, this operation returns only partial results. If you see a value of Scheduled or Running for the status, you can retry the operation later to see the final results.

" + "documentation":"

Returns the results from the specified query.

Only the fields requested in the query are returned, along with a @ptr field which is the identifier for the log record. You can use the value of @ptr in a GetLogRecord operation to get the full log record.

GetQueryResults does not start a query execution. To run a query, use StartQuery.

If the value of the Status field in the output is Running, this operation returns only partial results. If you see a value of Scheduled or Running for the status, you can retry the operation later to see the final results.

" }, "ListTagsLogGroup":{ "name":"ListTagsLogGroup", @@ -430,7 +457,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.

A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents.

Through an access policy, a destination controls what is written to it. By default, PutDestination does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination.

" + "documentation":"

Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.

A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents.

Through an access policy, a destination controls what is written to it. By default, PutDestination does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination.

" }, "PutDestinationPolicy":{ "name":"PutDestinationPolicy", @@ -478,7 +505,21 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates or updates a metric filter and associates it with the specified log group. Metric filters allow you to configure rules to extract metric data from log events ingested through PutLogEvents.

The maximum number of metric filters that can be associated with a log group is 100.

" + "documentation":"

Creates or updates a metric filter and associates it with the specified log group. Metric filters allow you to configure rules to extract metric data from log events ingested through PutLogEvents.

The maximum number of metric filters that can be associated with a log group is 100.

" + }, + "PutQueryDefinition":{ + "name":"PutQueryDefinition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutQueryDefinitionRequest"}, + "output":{"shape":"PutQueryDefinitionResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ] }, "PutResourcePolicy":{ "name":"PutResourcePolicy", @@ -524,7 +565,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates or updates a subscription filter and associates it with the specified log group. Subscription filters allow you to subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. Currently, the supported destinations are:

  • An Amazon Kinesis stream belonging to the same account as the subscription filter, for same-account delivery.

  • A logical destination that belongs to a different account, for cross-account delivery.

  • An Amazon Kinesis Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.

  • An AWS Lambda function that belongs to the same account as the subscription filter, for same-account delivery.

There can only be one subscription filter associated with a log group. If you are updating an existing filter, you must specify the correct name in filterName. Otherwise, the call fails because you cannot associate a second filter with a log group.

" + "documentation":"

Creates or updates a subscription filter and associates it with the specified log group. Subscription filters allow you to subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. Currently, the supported destinations are:

  • An Amazon Kinesis stream belonging to the same account as the subscription filter, for same-account delivery.

  • A logical destination that belongs to a different account, for cross-account delivery.

  • An Amazon Kinesis Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.

  • An AWS Lambda function that belongs to the same account as the subscription filter, for same-account delivery.

There can only be one subscription filter associated with a log group. If you are updating an existing filter, you must specify the correct name in filterName. Otherwise, the call fails because you cannot associate a second filter with a log group.

" }, "StartQuery":{ "name":"StartQuery", @@ -569,7 +610,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Adds or updates the specified tags for the specified log group.

To list the tags for a log group, use ListTagsLogGroup. To remove tags, use UntagLogGroup.

For more information about tags, see Tag Log Groups in Amazon CloudWatch Logs in the Amazon CloudWatch Logs User Guide.

" + "documentation":"

Adds or updates the specified tags for the specified log group.

To list the tags for a log group, use ListTagsLogGroup. To remove tags, use UntagLogGroup.

For more information about tags, see Tag Log Groups in Amazon CloudWatch Logs in the Amazon CloudWatch Logs User Guide.

" }, "TestMetricFilter":{ "name":"TestMetricFilter", @@ -595,7 +636,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes the specified tags from the specified log group.

To list the tags for a log group, use ListTagsLogGroup. To add tags, use UntagLogGroup.

" + "documentation":"

Removes the specified tags from the specified log group.

To list the tags for a log group, use ListTagsLogGroup. To add tags, use TagLogGroup.

" } }, "shapes":{ @@ -781,6 +822,19 @@ } } }, + "DeleteQueryDefinitionRequest":{ + "type":"structure", + "required":["queryDefinitionId"], + "members":{ + "queryDefinitionId":{"shape":"QueryId"} + } + }, + "DeleteQueryDefinitionResponse":{ + "type":"structure", + "members":{ + "success":{"shape":"Success"} + } + }, "DeleteResourcePolicyRequest":{ "type":"structure", "members":{ @@ -1020,6 +1074,21 @@ "nextToken":{"shape":"NextToken"} } }, + "DescribeQueryDefinitionsRequest":{ + "type":"structure", + "members":{ + "queryDefinitionNamePrefix":{"shape":"QueryDefinitionName"}, + "maxResults":{"shape":"QueryListMaxResults"}, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeQueryDefinitionsResponse":{ + "type":"structure", + "members":{ + "queryDefinitions":{"shape":"QueryDefinitionList"}, + "nextToken":{"shape":"NextToken"} + } + }, "DescribeResourcePoliciesRequest":{ "type":"structure", "members":{ @@ -1689,7 +1758,7 @@ "members":{ "queryCompileError":{"shape":"QueryCompileError"} }, - "documentation":"

The query string is not valid. Details about this error are displayed in a QueryCompileError object. For more information, see .

For more information about valid query syntax, see CloudWatch Logs Insights Query Syntax.

", + "documentation":"

The query string is not valid. Details about this error are displayed in a QueryCompileError object. For more information, see QueryCompileError\"/>.

For more information about valid query syntax, see CloudWatch Logs Insights Query Syntax.

", "exception":true }, "Message":{"type":"string"}, @@ -1767,7 +1836,7 @@ }, "metricNamespace":{ "shape":"MetricNamespace", - "documentation":"

The namespace of the CloudWatch metric.

" + "documentation":"

A custom namespace to contain your metric in CloudWatch. Use namespaces to group together metrics that are similar. For more information, see Namespaces.

" }, "metricValue":{ "shape":"MetricValue", @@ -1913,7 +1982,7 @@ }, "sequenceToken":{ "shape":"SequenceToken", - "documentation":"

The sequence token obtained from the response of the previous PutLogEvents call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token using DescribeLogStreams. If you call PutLogEvents twice within a narrow time period using the same value for sequenceToken, both calls may be successful, or one may be rejected.

" + "documentation":"

The sequence token obtained from the response of the previous PutLogEvents call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token using DescribeLogStreams. If you call PutLogEvents twice within a narrow time period using the same value for sequenceToken, both calls may be successful, or one may be rejected.

" } } }, @@ -1957,6 +2026,25 @@ } } }, + "PutQueryDefinitionRequest":{ + "type":"structure", + "required":[ + "name", + "queryString" + ], + "members":{ + "name":{"shape":"QueryDefinitionName"}, + "queryDefinitionId":{"shape":"QueryId"}, + "logGroupNames":{"shape":"LogGroupNames"}, + "queryString":{"shape":"QueryDefinitionString"} + } + }, + "PutQueryDefinitionResponse":{ + "type":"structure", + "members":{ + "queryDefinitionId":{"shape":"QueryId"} + } + }, "PutResourcePolicyRequest":{ "type":"structure", "members":{ @@ -2008,7 +2096,7 @@ }, "filterName":{ "shape":"FilterName", - "documentation":"

A name for the subscription filter. If you are updating an existing filter, you must specify the correct name in filterName. Otherwise, the call fails because you cannot associate a second filter with a log group. To find the name of the filter currently associated with a log group, use DescribeSubscriptionFilters.

" + "documentation":"

A name for the subscription filter. If you are updating an existing filter, you must specify the correct name in filterName. Otherwise, the call fails because you cannot associate a second filter with a log group. To find the name of the filter currently associated with a log group, use DescribeSubscriptionFilters.

" }, "filterPattern":{ "shape":"FilterPattern", @@ -2057,6 +2145,31 @@ }, "documentation":"

Reserved.

" }, + "QueryDefinition":{ + "type":"structure", + "members":{ + "queryDefinitionId":{"shape":"QueryId"}, + "name":{"shape":"QueryDefinitionName"}, + "queryString":{"shape":"QueryDefinitionString"}, + "lastModified":{"shape":"Timestamp"}, + "logGroupNames":{"shape":"LogGroupNames"} + } + }, + "QueryDefinitionList":{ + "type":"list", + "member":{"shape":"QueryDefinition"} + }, + "QueryDefinitionName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^([^:*\\/]+\\/?)*[^:*\\/]+$" + }, + "QueryDefinitionString":{ + "type":"string", + "max":10000, + "min":1 + }, "QueryId":{ "type":"string", "max":256, @@ -2092,6 +2205,11 @@ "type":"list", "member":{"shape":"QueryInfo"} }, + "QueryListMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, "QueryResults":{ "type":"list", "member":{"shape":"ResultRows"} @@ -2126,7 +2244,7 @@ }, "QueryString":{ "type":"string", - "max":2048, + "max":10000, "min":0 }, "RejectedLogEventsInfo":{ @@ -2195,7 +2313,7 @@ "documentation":"

The value of this field.

" } }, - "documentation":"

Contains one field from one log event returned by a CloudWatch Logs Insights query, along with the value of that field.

" + "documentation":"

Contains one field from one log event returned by a CloudWatch Logs Insights query, along with the value of that field.

For more information about the fields that are generated by CloudWatch logs, see Supported Logs and Discovered Fields.

" }, "ResultRows":{ "type":"list", diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml new file mode 100644 index 000000000000..e907473799af --- /dev/null +++ b/services/codeartifact/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.13.56-SNAPSHOT + + codeartifact + AWS Java SDK :: Services :: Codeartifact + The AWS Java SDK for Codeartifact module holds the client classes that are used for + communicating with Codeartifact. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.codeartifact + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/codeartifact/src/main/resources/codegen-resources/paginators-1.json b/services/codeartifact/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..ef8602842ebc --- /dev/null +++ b/services/codeartifact/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,40 @@ +{ + "pagination": { + "ListDomains": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "domains" + }, + "ListPackageVersionAssets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assets" + }, + "ListPackageVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "versions" + }, + "ListPackages": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "packages" + }, + "ListRepositories": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "repositories" + }, + "ListRepositoriesInDomain": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "repositories" + } + } +} diff --git a/services/codeartifact/src/main/resources/codegen-resources/service-2.json b/services/codeartifact/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..1504f3b5e14e --- /dev/null +++ b/services/codeartifact/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2962 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-09-22", + "endpointPrefix":"codeartifact", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"CodeArtifact", + "serviceId":"codeartifact", + "signatureVersion":"v4", + "signingName":"codeartifact", + "uid":"codeartifact-2018-09-22" + }, + "operations":{ + "AssociateExternalConnection":{ + "name":"AssociateExternalConnection", + "http":{ + "method":"POST", + "requestUri":"/v1/repository/external-connection" + }, + "input":{"shape":"AssociateExternalConnectionRequest"}, + "output":{"shape":"AssociateExternalConnectionResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Adds an existing external connection to a repository. One external connection is allowed per repository.

A repository can have one or more upstream repositories, or an external connection.

" + }, + "CopyPackageVersions":{ + "name":"CopyPackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/copy" + }, + "input":{"shape":"CopyPackageVersionsRequest"}, + "output":{"shape":"CopyPackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Copies package versions from one repository to another repository in the same domain.

You must specify versions or versionRevisions. You cannot specify both.

" + }, + "CreateDomain":{ + "name":"CreateDomain", + "http":{ + "method":"POST", + "requestUri":"/v1/domain" + }, + "input":{"shape":"CreateDomainRequest"}, + "output":{"shape":"CreateDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a domain. CodeArtifact domains make it easier to manage multiple repositories across an organization. You can use a domain to apply permissions across many repositories owned by different AWS accounts. An asset is stored only once in a domain, even if it's in multiple repositories.

Although you can have multiple domains, we recommend a single production domain that contains all published artifacts so that your development teams can find and share packages. You can use a second pre-production domain to test changes to the production domain configuration.

" + }, + "CreateRepository":{ + "name":"CreateRepository", + "http":{ + "method":"POST", + "requestUri":"/v1/repository" + }, + "input":{"shape":"CreateRepositoryRequest"}, + "output":{"shape":"CreateRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a repository.

" + }, + "DeleteDomain":{ + "name":"DeleteDomain", + "http":{ + "method":"DELETE", + "requestUri":"/v1/domain" + }, + "input":{"shape":"DeleteDomainRequest"}, + "output":{"shape":"DeleteDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes a domain. You cannot delete a domain that contains repositories. If you want to delete a domain with repositories, first delete its repositories.

" + }, + "DeleteDomainPermissionsPolicy":{ + "name":"DeleteDomainPermissionsPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/v1/domain/permissions/policy" + }, + "input":{"shape":"DeleteDomainPermissionsPolicyRequest"}, + "output":{"shape":"DeleteDomainPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the resource policy set on a domain.

" + }, + "DeletePackageVersions":{ + "name":"DeletePackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/delete" + }, + "input":{"shape":"DeletePackageVersionsRequest"}, + "output":{"shape":"DeletePackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes one or more versions of a package. A deleted package version cannot be restored in your repository. If you want to remove a package version from your repository and be able to restore it later, set its status to Archived. Archived packages cannot be downloaded from a repository and don't show up with list package APIs (for example, ListackageVersions ), but you can restore them using UpdatePackageVersionsStatus .

" + }, + "DeleteRepository":{ + "name":"DeleteRepository", + "http":{ + "method":"DELETE", + "requestUri":"/v1/repository" + }, + "input":{"shape":"DeleteRepositoryRequest"}, + "output":{"shape":"DeleteRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes a repository.

" + }, + "DeleteRepositoryPermissionsPolicy":{ + "name":"DeleteRepositoryPermissionsPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/v1/repository/permissions/policies" + }, + "input":{"shape":"DeleteRepositoryPermissionsPolicyRequest"}, + "output":{"shape":"DeleteRepositoryPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the resource policy that is set on a repository. After a resource policy is deleted, the permissions allowed and denied by the deleted policy are removed. The effect of deleting a resource policy might not be immediate.

Use DeleteRepositoryPermissionsPolicy with caution. After a policy is deleted, AWS users, roles, and accounts lose permissions to perform the repository actions granted by the deleted policy.

" + }, + "DescribeDomain":{ + "name":"DescribeDomain", + "http":{ + "method":"GET", + "requestUri":"/v1/domain" + }, + "input":{"shape":"DescribeDomainRequest"}, + "output":{"shape":"DescribeDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a DomainDescription object that contains information about the requested domain.

" + }, + "DescribePackageVersion":{ + "name":"DescribePackageVersion", + "http":{ + "method":"GET", + "requestUri":"/v1/package/version" + }, + "input":{"shape":"DescribePackageVersionRequest"}, + "output":{"shape":"DescribePackageVersionResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a PackageVersionDescription object that contains information about the requested package version.

" + }, + "DescribeRepository":{ + "name":"DescribeRepository", + "http":{ + "method":"GET", + "requestUri":"/v1/repository" + }, + "input":{"shape":"DescribeRepositoryRequest"}, + "output":{"shape":"DescribeRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a RepositoryDescription object that contains detailed information about the requested repository.

" + }, + "DisassociateExternalConnection":{ + "name":"DisassociateExternalConnection", + "http":{ + "method":"DELETE", + "requestUri":"/v1/repository/external-connection" + }, + "input":{"shape":"DisassociateExternalConnectionRequest"}, + "output":{"shape":"DisassociateExternalConnectionResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Removes an existing external connection from a repository.

" + }, + "DisposePackageVersions":{ + "name":"DisposePackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/dispose" + }, + "input":{"shape":"DisposePackageVersionsRequest"}, + "output":{"shape":"DisposePackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the assets in package versions and sets the package versions' status to Disposed. A disposed package version cannot be restored in your repository because its assets are deleted.

To view all disposed package versions in a repository, use ListackageVersions and set the status parameter to Disposed.

To view information about a disposed package version, use ListPackageVersions and set the status parameter to Disposed.

" + }, + "GetAuthorizationToken":{ + "name":"GetAuthorizationToken", + "http":{ + "method":"POST", + "requestUri":"/v1/authorization-token" + }, + "input":{"shape":"GetAuthorizationTokenRequest"}, + "output":{"shape":"GetAuthorizationTokenResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Generates a temporary authentication token for accessing repositories in the domain. This API requires the codeartifact:GetAuthorizationToken and sts:GetServiceBearerToken permissions.

CodeArtifact authorization tokens are valid for a period of 12 hours when created with the login command. You can call login periodically to refresh the token. When you create an authorization token with the GetAuthorizationToken API, you can set a custom authorization period, up to a maximum of 12 hours, with the durationSeconds parameter.

The authorization period begins after login or GetAuthorizationToken is called. If login or GetAuthorizationToken is called while assuming a role, the token lifetime is independent of the maximum session duration of the role. For example, if you call sts assume-role and specify a session duration of 15 minutes, then generate a CodeArtifact authorization token, the token will be valid for the full authorization period even though this is longer than the 15-minute session duration.

See Using IAM Roles for more information on controlling session duration.

" + }, + "GetDomainPermissionsPolicy":{ + "name":"GetDomainPermissionsPolicy", + "http":{ + "method":"GET", + "requestUri":"/v1/domain/permissions/policy" + }, + "input":{"shape":"GetDomainPermissionsPolicyRequest"}, + "output":{"shape":"GetDomainPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the resource policy attached to the specified domain.

The policy is a resource-based policy, not an identity-based policy. For more information, see Identity-based policies and resource-based policies in the AWS Identity and Access Management User Guide.

" + }, + "GetPackageVersionAsset":{ + "name":"GetPackageVersionAsset", + "http":{ + "method":"GET", + "requestUri":"/v1/package/version/asset" + }, + "input":{"shape":"GetPackageVersionAssetRequest"}, + "output":{"shape":"GetPackageVersionAssetResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns an asset (or file) that is in a package. For example, for a Maven package version, use GetPackageVersionAsset to download a JAR file, a POM file, or any other assets in the package version.

" + }, + "GetPackageVersionReadme":{ + "name":"GetPackageVersionReadme", + "http":{ + "method":"GET", + "requestUri":"/v1/package/version/readme" + }, + "input":{"shape":"GetPackageVersionReadmeRequest"}, + "output":{"shape":"GetPackageVersionReadmeResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Gets the readme file or descriptive text for a package version. For packages that do not contain a readme file, CodeArtifact extracts a description from a metadata file. For example, from the <description> element in the pom.xml file of a Maven package.

The returned text might contain formatting. For example, it might contain formatting for Markdown or reStructuredText.

" + }, + "GetRepositoryEndpoint":{ + "name":"GetRepositoryEndpoint", + "http":{ + "method":"GET", + "requestUri":"/v1/repository/endpoint" + }, + "input":{"shape":"GetRepositoryEndpointRequest"}, + "output":{"shape":"GetRepositoryEndpointResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

  • npm

  • pypi

  • maven

" + }, + "GetRepositoryPermissionsPolicy":{ + "name":"GetRepositoryPermissionsPolicy", + "http":{ + "method":"GET", + "requestUri":"/v1/repository/permissions/policy" + }, + "input":{"shape":"GetRepositoryPermissionsPolicyRequest"}, + "output":{"shape":"GetRepositoryPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the resource policy that is set on a repository.

" + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"POST", + "requestUri":"/v1/domains" + }, + "input":{"shape":"ListDomainsRequest"}, + "output":{"shape":"ListDomainsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of DomainSummary objects for all domains owned by the AWS account that makes this call. Each returned DomainSummary object contains information about a domain.

" + }, + "ListPackageVersionAssets":{ + "name":"ListPackageVersionAssets", + "http":{ + "method":"POST", + "requestUri":"/v1/package/version/assets" + }, + "input":{"shape":"ListPackageVersionAssetsRequest"}, + "output":{"shape":"ListPackageVersionAssetsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of AssetSummary objects for assets in a package version.

" + }, + "ListPackageVersionDependencies":{ + "name":"ListPackageVersionDependencies", + "http":{ + "method":"POST", + "requestUri":"/v1/package/version/dependencies" + }, + "input":{"shape":"ListPackageVersionDependenciesRequest"}, + "output":{"shape":"ListPackageVersionDependenciesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the direct dependencies for a package version. The dependencies are returned as PackageDependency objects. CodeArtifact extracts the dependencies for a package version from the metadata file for the package format (for example, the package.json file for npm packages and the pom.xml file for Maven). Any package version dependencies that are not listed in the configuration file are not returned.

" + }, + "ListPackageVersions":{ + "name":"ListPackageVersions", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions" + }, + "input":{"shape":"ListPackageVersionsRequest"}, + "output":{"shape":"ListPackageVersionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of PackageVersionSummary objects for package versions in a repository that match the request parameters.

" + }, + "ListPackages":{ + "name":"ListPackages", + "http":{ + "method":"POST", + "requestUri":"/v1/packages" + }, + "input":{"shape":"ListPackagesRequest"}, + "output":{"shape":"ListPackagesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of PackageSummary objects for packages in a repository that match the request parameters.

" + }, + "ListRepositories":{ + "name":"ListRepositories", + "http":{ + "method":"POST", + "requestUri":"/v1/repositories" + }, + "input":{"shape":"ListRepositoriesRequest"}, + "output":{"shape":"ListRepositoriesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of RepositorySummary objects. Each RepositorySummary contains information about a repository in the specified AWS account and that matches the input parameters.

" + }, + "ListRepositoriesInDomain":{ + "name":"ListRepositoriesInDomain", + "http":{ + "method":"POST", + "requestUri":"/v1/domain/repositories" + }, + "input":{"shape":"ListRepositoriesInDomainRequest"}, + "output":{"shape":"ListRepositoriesInDomainResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of RepositorySummary objects. Each RepositorySummary contains information about a repository in the specified domain and that matches the input parameters.

" + }, + "PutDomainPermissionsPolicy":{ + "name":"PutDomainPermissionsPolicy", + "http":{ + "method":"PUT", + "requestUri":"/v1/domain/permissions/policy" + }, + "input":{"shape":"PutDomainPermissionsPolicyRequest"}, + "output":{"shape":"PutDomainPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Sets a resource policy on a domain that specifies permissions to access it.

" + }, + "PutRepositoryPermissionsPolicy":{ + "name":"PutRepositoryPermissionsPolicy", + "http":{ + "method":"PUT", + "requestUri":"/v1/repository/permissions/policy" + }, + "input":{"shape":"PutRepositoryPermissionsPolicyRequest"}, + "output":{"shape":"PutRepositoryPermissionsPolicyResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Sets the resource policy on a repository that specifies permissions to access it.

" + }, + "UpdatePackageVersionsStatus":{ + "name":"UpdatePackageVersionsStatus", + "http":{ + "method":"POST", + "requestUri":"/v1/package/versions/update_status" + }, + "input":{"shape":"UpdatePackageVersionsStatusRequest"}, + "output":{"shape":"UpdatePackageVersionsStatusResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates the status of one or more versions of a package.

" + }, + "UpdateRepository":{ + "name":"UpdateRepository", + "http":{ + "method":"PUT", + "requestUri":"/v1/repository" + }, + "input":{"shape":"UpdateRepositoryRequest"}, + "output":{"shape":"UpdateRepositoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Update the properties of a repository.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The operation did not succeed because of an unauthorized access attempt.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"[0-9]{12}" + }, + "Arn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"\\S+" + }, + "Asset":{ + "type":"blob", + "streaming":true + }, + "AssetHashes":{ + "type":"map", + "key":{"shape":"HashAlgorithm"}, + "value":{"shape":"HashValue"} + }, + "AssetName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"\\P{C}+" + }, + "AssetSummary":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"AssetName", + "documentation":"

The name of the asset.

" + }, + "size":{ + "shape":"LongOptional", + "documentation":"

The size of the asset.

" + }, + "hashes":{ + "shape":"AssetHashes", + "documentation":"

The hashes of the asset.

" + } + }, + "documentation":"

Contains details about a package version asset.

" + }, + "AssetSummaryList":{ + "type":"list", + "member":{"shape":"AssetSummary"} + }, + "AssociateExternalConnectionRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "externalConnection" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to which the external connection is added.

", + "location":"querystring", + "locationName":"repository" + }, + "externalConnection":{ + "shape":"ExternalConnectionName", + "documentation":"

The name of the external connection to add to the repository. The following values are supported:

  • public:npmjs - for the npm public repository.

  • public:pypi - for the Python Package Index.

  • public:maven-central - for Maven Central.

  • public:maven-googleandroid - for the Google Android repository.

  • public:maven-gradleplugins - for the Gradle plugins repository.

  • public:maven-commonsware - for the CommonsWare Android repository.

", + "location":"querystring", + "locationName":"external-connection" + } + } + }, + "AssociateExternalConnectionResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

Information about the connected repository after processing the request.

" + } + } + }, + "AuthorizationTokenDurationSeconds":{ + "type":"long", + "max":43200, + "min":0 + }, + "BooleanOptional":{"type":"boolean"}, + "ConflictException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The ID of the resource.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource.

" + } + }, + "documentation":"

The operation did not succeed because prerequisites are not met.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CopyPackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "sourceRepository", + "destinationRepository", + "format", + "package" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the source and destination repositories.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "sourceRepository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package versions to copy.

", + "location":"querystring", + "locationName":"source-repository" + }, + "destinationRepository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository into which package versions are copied.

", + "location":"querystring", + "locationName":"destination-repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package that is copied. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that is copied.

", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

The versions of the package to copy.

You must specify versions or versionRevisions. You cannot specify both.

" + }, + "versionRevisions":{ + "shape":"PackageVersionRevisionMap", + "documentation":"

A list of key-value pairs. The keys are package versions and the values are package version revisions. A CopyPackageVersion operation succeeds if the specified versions in the source repository match the specified package version revision.

You must specify versions or versionRevisions. You cannot specify both.

" + }, + "allowOverwrite":{ + "shape":"BooleanOptional", + "documentation":"

Set to true to overwrite a package version that already exists in the destination repository. If set to false and the package version already exists in the destination repository, the package version is returned in the failedVersions field of the response with an ALREADY_EXISTS error code.

" + }, + "includeFromUpstream":{ + "shape":"BooleanOptional", + "documentation":"

Set to true to copy packages from repositories that are upstream from the source repository to the destination repository. The default setting is false. For more information, see Working with upstream repositories.

" + } + } + }, + "CopyPackageVersionsResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

A list of the package versions that were successfully copied to your repository.

" + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

A map of package versions that failed to copy and their error codes. The possible error codes are in the PackageVersionError data type. They are:

  • ALREADY_EXISTS

  • MISMATCHED_REVISION

  • MISMATCHED_STATUS

  • NOT_ALLOWED

  • NOT_FOUND

  • SKIPPED

" + } + } + }, + "CreateDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain to create. All domain names in an AWS Region that are in the same AWS account must be unique. The domain name is used as the prefix in DNS hostnames. Do not use sensitive information in a domain name because it is publicly discoverable.

", + "location":"querystring", + "locationName":"domain" + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

The encryption key for the domain. This is used to encrypt content stored in a domain. An encryption key can be a key ID, a key Amazon Resource Name (ARN), a key alias, or a key alias ARN. To specify an encryptionKey, your IAM role must have kms:DescribeKey and kms:CreateGrant permissions on the encryption key that is used. For more information, see DescribeKey in the AWS Key Management Service API Reference and AWS KMS API Permissions Reference in the AWS Key Management Service Developer Guide.

CodeArtifact supports only symmetric CMKs. Do not associate an asymmetric CMK with your domain. For more information, see Using symmetric and asymmetric keys in the AWS Key Management Service Developer Guide.

" + } + } + }, + "CreateDomainResult":{ + "type":"structure", + "members":{ + "domain":{ + "shape":"DomainDescription", + "documentation":"

Contains information about the created domain after processing the request.

" + } + } + }, + "CreateRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The domain that contains the created repository.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to create.

", + "location":"querystring", + "locationName":"repository" + }, + "description":{ + "shape":"Description", + "documentation":"

A description of the created repository.

" + }, + "upstreams":{ + "shape":"UpstreamRepositoryList", + "documentation":"

A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more information, see Working with upstream repositories.

" + } + } + }, + "CreateRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

Information about the created repository after processing the request.

" + } + } + }, + "DeleteDomainPermissionsPolicyRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain associated with the resource policy to be deleted.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

The current revision of the resource policy to be deleted. This revision is used for optimistic locking, which prevents others from overwriting your changes to the domain's resource policy.

", + "location":"querystring", + "locationName":"policy-revision" + } + } + }, + "DeleteDomainPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

Information about the deleted resource policy after processing the request.

" + } + } + }, + "DeleteDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain to delete.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + } + } + }, + "DeleteDomainResult":{ + "type":"structure", + "members":{ + "domain":{ + "shape":"DomainDescription", + "documentation":"

Contains information about the deleted domain after processing the request.

" + } + } + }, + "DeletePackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "versions" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the package to delete.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package versions to delete.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package versions to delete. The valid values are:

  • npm

  • pypi

  • maven

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package with the versions to delete.

", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

An array of strings that specify the versions of the package to delete.

" + }, + "expectedStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

The expected status of the package version to delete. Valid values are:

  • Published

  • Unfinished

  • Unlisted

  • Archived

  • Disposed

" + } + } + }, + "DeletePackageVersionsResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

A list of the package versions that were successfully deleted.

" + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

A PackageVersionError object that contains a map of errors codes for the deleted package that failed. The possible error codes are:

  • ALREADY_EXISTS

  • MISMATCHED_REVISION

  • MISMATCHED_STATUS

  • NOT_ALLOWED

  • NOT_FOUND

  • SKIPPED

" + } + } + }, + "DeleteRepositoryPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository associated with the resource policy to be deleted.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that is associated with the resource policy to be deleted

", + "location":"querystring", + "locationName":"repository" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

The revision of the repository's resource policy to be deleted. This revision is used for optimistic locking, which prevents others from accidentally overwriting your changes to the repository's resource policy.

", + "location":"querystring", + "locationName":"policy-revision" + } + } + }, + "DeleteRepositoryPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

Information about the deleted policy after processing the request.

" + } + } + }, + "DeleteRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository to delete.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to delete.

", + "location":"querystring", + "locationName":"repository" + } + } + }, + "DeleteRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

Information about the deleted repository after processing the request.

" + } + } + }, + "DescribeDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

A string that specifies the name of the requested domain.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + } + } + }, + "DescribeDomainResult":{ + "type":"structure", + "members":{ + "domain":{"shape":"DomainDescription"} + } + }, + "DescribePackageVersionRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository that contains the package version.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package version.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of the requested package version. The valid values are:

  • npm

  • pypi

  • maven

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the requested package version.

", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"querystring", + "locationName":"version" + } + } + }, + "DescribePackageVersionResult":{ + "type":"structure", + "required":["packageVersion"], + "members":{ + "packageVersion":{ + "shape":"PackageVersionDescription", + "documentation":"

A PackageVersionDescription object that contains information about the requested package version.

" + } + } + }, + "DescribeRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository to describe.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

A string that specifies the name of the requested repository.

", + "location":"querystring", + "locationName":"repository" + } + } + }, + "DescribeRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

A RepositoryDescription object that contains the requested repository information.

" + } + } + }, + "Description":{ + "type":"string", + "max":1000, + "pattern":"\\P{C}+" + }, + "DisassociateExternalConnectionRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "externalConnection" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository from which to remove the external repository.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository from which the external connection will be removed.

", + "location":"querystring", + "locationName":"repository" + }, + "externalConnection":{ + "shape":"ExternalConnectionName", + "documentation":"

The name of the external connection to be removed from the repository.

", + "location":"querystring", + "locationName":"external-connection" + } + } + }, + "DisassociateExternalConnectionResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

The repository associated with the removed external connection.

" + } + } + }, + "DisposePackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "versions" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository you want to dispose.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package versions you want to dispose.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of package versions you want to dispose. The valid values are:

  • npm

  • pypi

  • maven

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package with the versions you want to dispose.

", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

The versions of the package you want to dispose.

" + }, + "versionRevisions":{ + "shape":"PackageVersionRevisionMap", + "documentation":"

The revisions of the package versions you want to dispose.

" + }, + "expectedStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

The expected status of the package version to dispose. Valid values are:

  • Published

  • Unfinished

  • Unlisted

  • Archived

  • Disposed

" + } + } + }, + "DisposePackageVersionsResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

A list of the package versions that were successfully disposed.

" + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

A PackageVersionError object that contains a map of errors codes for the disposed package versions that failed. The possible error codes are:

  • ALREADY_EXISTS

  • MISMATCHED_REVISION

  • MISMATCHED_STATUS

  • NOT_ALLOWED

  • NOT_FOUND

  • SKIPPED

" + } + } + }, + "DomainDescription":{ + "type":"structure", + "members":{ + "name":{ + "shape":"DomainName", + "documentation":"

The name of the domain.

" + }, + "owner":{ + "shape":"AccountId", + "documentation":"

The AWS account ID that owns the domain.

" + }, + "arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the domain.

" + }, + "status":{ + "shape":"DomainStatus", + "documentation":"

The current status of a domain. The valid values are

  • Active

  • Deleted

" + }, + "createdTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp that represents the date and time the domain was created.

" + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

The ARN of an AWS Key Management Service (AWS KMS) key associated with a domain.

" + }, + "repositoryCount":{ + "shape":"Integer", + "documentation":"

The number of repositories in the domain.

" + }, + "assetSizeBytes":{ + "shape":"Long", + "documentation":"

The total size of all assets in the domain.

" + } + }, + "documentation":"

Information about a domain. A domain is a container for repositories. When you create a domain, it is empty until you add one or more repositories.

" + }, + "DomainName":{ + "type":"string", + "max":50, + "min":2, + "pattern":"[a-z][a-z0-9\\-]{0,48}[a-z0-9]" + }, + "DomainStatus":{ + "type":"string", + "enum":[ + "Active", + "Deleted" + ] + }, + "DomainSummary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"DomainName", + "documentation":"

The name of the domain.

" + }, + "owner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

" + }, + "arn":{ + "shape":"Arn", + "documentation":"

The ARN of the domain.

" + }, + "status":{ + "shape":"DomainStatus", + "documentation":"

A string that contains the status of the domain. The valid values are:

  • Active

  • Deleted

" + }, + "createdTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp that contains the date and time the domain was created.

" + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

The key used to encrypt the domain.

" + } + }, + "documentation":"

Information about a domain, including its name, Amazon Resource Name (ARN), and status. The ListDomains operation returns a list of DomainSummary objects.

" + }, + "DomainSummaryList":{ + "type":"list", + "member":{"shape":"DomainSummary"} + }, + "ErrorMessage":{"type":"string"}, + "ExternalConnectionName":{ + "type":"string", + "pattern":"[A-Za-z0-9][A-Za-z0-9._\\-:]{1,99}" + }, + "ExternalConnectionStatus":{ + "type":"string", + "enum":["Available"] + }, + "GetAuthorizationTokenRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that is in scope for the generated authorization token.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "durationSeconds":{ + "shape":"AuthorizationTokenDurationSeconds", + "documentation":"

The time, in seconds, that the generated authorization token is valid.

", + "location":"querystring", + "locationName":"duration" + } + } + }, + "GetAuthorizationTokenResult":{ + "type":"structure", + "members":{ + "authorizationToken":{ + "shape":"String", + "documentation":"

The returned authentication token.

" + }, + "expiration":{ + "shape":"Timestamp", + "documentation":"

A timestamp that specifies the date and time the authorization token expires.

" + } + } + }, + "GetDomainPermissionsPolicyRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain to which the resource policy is attached.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + } + } + }, + "GetDomainPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

The returned resource policy.

" + } + } + }, + "GetPackageVersionAssetRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion", + "asset" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The domain that contains the repository that contains the package version with the requested asset.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The repository that contains the package version with the requested asset.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of the package version with the requested asset file. The valid values are:

  • npm

  • pypi

  • maven

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that contains the requested asset.

", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"querystring", + "locationName":"version" + }, + "asset":{ + "shape":"AssetName", + "documentation":"

The name of the requested asset.

", + "location":"querystring", + "locationName":"asset" + }, + "packageVersionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

The name of the package version revision that contains the requested asset.

", + "location":"querystring", + "locationName":"revision" + } + } + }, + "GetPackageVersionAssetResult":{ + "type":"structure", + "members":{ + "asset":{ + "shape":"Asset", + "documentation":"

The binary file, or asset, that is downloaded.

" + }, + "assetName":{ + "shape":"AssetName", + "documentation":"

The name of the asset that is downloaded.

", + "location":"header", + "locationName":"X-AssetName" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"header", + "locationName":"X-PackageVersion" + }, + "packageVersionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

The name of the package version revision that contains the downloaded asset.

", + "location":"header", + "locationName":"X-PackageVersionRevision" + } + }, + "payload":"asset" + }, + "GetPackageVersionReadmeRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository that contains the package version with the requested readme file.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The repository that contains the package with the requested readme file.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of the package version with the requested readme file. The valid values are:

  • npm

  • pypi

  • maven

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package version that contains the requested readme file.

", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"querystring", + "locationName":"version" + } + } + }, + "GetPackageVersionReadmeResult":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package with the requested readme file. Valid format types are:

  • npm

  • pypi

  • maven

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that contains the returned readme file.

" + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

The version of the package with the requested readme file.

" + }, + "versionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

The current revision associated with the package version.

" + }, + "readme":{ + "shape":"String", + "documentation":"

The text of the returned readme file.

" + } + } + }, + "GetRepositoryEndpointRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain that contains the repository. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

Returns which endpoint of a repository to return. A repository has one endpoint for each package format:

  • npm

  • pypi

  • maven

", + "location":"querystring", + "locationName":"format" + } + } + }, + "GetRepositoryEndpointResult":{ + "type":"structure", + "members":{ + "repositoryEndpoint":{ + "shape":"String", + "documentation":"

A string that specifies the URL of the returned endpoint.

" + } + } + }, + "GetRepositoryPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain containing the repository whose associated resource policy is to be retrieved.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository whose associated resource policy is to be retrieved.

", + "location":"querystring", + "locationName":"repository" + } + } + }, + "GetRepositoryPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

The returned resource policy.

" + } + } + }, + "HashAlgorithm":{ + "type":"string", + "enum":[ + "MD5", + "SHA-1", + "SHA-256", + "SHA-512" + ] + }, + "HashValue":{ + "type":"string", + "max":512, + "min":32, + "pattern":"[0-9a-f]+" + }, + "Integer":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The operation did not succeed because of an error that occurred inside AWS CodeArtifact.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "LicenseInfo":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

Name of the license.

" + }, + "url":{ + "shape":"String", + "documentation":"

The URL for license data.

" + } + }, + "documentation":"

Details of the license data.

" + }, + "LicenseInfoList":{ + "type":"list", + "member":{"shape":"LicenseInfo"} + }, + "ListDomainsMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListDomainsMaxResults", + "documentation":"

The maximum number of results to return per page.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + } + } + }, + "ListDomainsResult":{ + "type":"structure", + "members":{ + "domains":{ + "shape":"DomainSummaryList", + "documentation":"

The returned list of DomainSummary objects.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + } + } + }, + "ListPackageVersionAssetsMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListPackageVersionAssetsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository associated with the package version assets.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package that contains the returned package version assets.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package that contains the returned package version assets. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that contains the returned package version assets.

", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"querystring", + "locationName":"version" + }, + "maxResults":{ + "shape":"ListPackageVersionAssetsMaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackageVersionAssetsResult":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package that contains the returned package version assets.

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that contains the returned package version assets.

" + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

The version of the package associated with the returned assets.

" + }, + "versionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

The current revision associated with the package version.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + }, + "assets":{ + "shape":"AssetSummaryList", + "documentation":"

The returned list of AssetSummary objects.

" + } + } + }, + "ListPackageVersionDependenciesRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "packageVersion" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The domain that contains the repository that contains the requested package version dependencies.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the requested package version.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package with the requested dependencies. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package versions' package.

", + "location":"querystring", + "locationName":"package" + }, + "packageVersion":{ + "shape":"PackageVersion", + "documentation":"

A string that contains the package version (for example, 3.5.2).

", + "location":"querystring", + "locationName":"version" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackageVersionDependenciesResult":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of the package that contains the returned dependencies. The valid values are:

  • npm

  • pypi

  • maven

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that contains the returned package versions dependencies.

" + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

The version of the package that is specified in the request.

" + }, + "versionRevision":{ + "shape":"PackageVersionRevision", + "documentation":"

The current revision associated with the package version.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + }, + "dependencies":{ + "shape":"PackageDependencyList", + "documentation":"

The returned list of PackageDependency objects.

" + } + } + }, + "ListPackageVersionsMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListPackageVersionsRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository that contains the returned package versions.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository that contains the package.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the returned packages. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package for which you want to return a list of package versions.

", + "location":"querystring", + "locationName":"package" + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

A string that specifies the status of the package versions to include in the returned list. It can be one of the following:

  • Published

  • Unfinished

  • Unlisted

  • Archived

  • Disposed

", + "location":"querystring", + "locationName":"status" + }, + "sortBy":{ + "shape":"PackageVersionSortType", + "documentation":"

How to sort the returned list of package versions.

", + "location":"querystring", + "locationName":"sortBy" + }, + "maxResults":{ + "shape":"ListPackageVersionsMaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackageVersionsResult":{ + "type":"structure", + "members":{ + "defaultDisplayVersion":{ + "shape":"PackageVersion", + "documentation":"

The default package version to display. This depends on the package format:

  • For Maven and PyPI packages, it's the most recently published package version.

  • For npm packages, it's the version referenced by the latest tag. If the latest tag is not set, it's the most recently published package version.

" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format of the package. Valid package format values are:

  • npm

  • pypi

  • maven

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package.

" + }, + "versions":{ + "shape":"PackageVersionSummaryList", + "documentation":"

The returned list of PackageVersionSummary objects.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListPackagesMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListPackagesRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The domain that contains the repository that contains the requested list of packages.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository from which packages are to be listed.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the packages. The valid package types are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "packagePrefix":{ + "shape":"PackageName", + "documentation":"

A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix are returned.

", + "location":"querystring", + "locationName":"package-prefix" + }, + "maxResults":{ + "shape":"ListPackagesMaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListPackagesResult":{ + "type":"structure", + "members":{ + "packages":{ + "shape":"PackageSummaryList", + "documentation":"

The list of returned PackageSummary objects.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListRepositoriesInDomainMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListRepositoriesInDomainRequest":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the returned list of repositories.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "administratorAccount":{ + "shape":"AccountId", + "documentation":"

Filter the list of repositories to only include those that are managed by the AWS account ID.

", + "location":"querystring", + "locationName":"administrator-account" + }, + "repositoryPrefix":{ + "shape":"RepositoryName", + "documentation":"

A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix are returned.

", + "location":"querystring", + "locationName":"repository-prefix" + }, + "maxResults":{ + "shape":"ListRepositoriesInDomainMaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListRepositoriesInDomainResult":{ + "type":"structure", + "members":{ + "repositories":{ + "shape":"RepositorySummaryList", + "documentation":"

The returned list of repositories.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListRepositoriesMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListRepositoriesRequest":{ + "type":"structure", + "members":{ + "repositoryPrefix":{ + "shape":"RepositoryName", + "documentation":"

A prefix used to filter returned repositories. Only repositories with names that start with repositoryPrefix are returned.

", + "location":"querystring", + "locationName":"repository-prefix" + }, + "maxResults":{ + "shape":"ListRepositoriesMaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListRepositoriesResult":{ + "type":"structure", + "members":{ + "repositories":{ + "shape":"RepositorySummaryList", + "documentation":"

The returned list of RepositorySummary objects.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "Long":{"type":"long"}, + "LongOptional":{"type":"long"}, + "PackageDependency":{ + "type":"structure", + "members":{ + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package that this package depends on.

" + }, + "dependencyType":{ + "shape":"String", + "documentation":"

The type of a package dependency. The possible values depend on the package type. Example types are compile, runtime, and test for Maven packages, and dev, prod, and optional for npm packages.

" + }, + "versionRequirement":{ + "shape":"String", + "documentation":"

The required version, or version range, of the package that this package depends on. The version format is specific to the package type. For example, the following are possible valid required versions: 1.2.3, ^2.3.4, or 4.x.

" + } + }, + "documentation":"

Details about a package dependency.

" + }, + "PackageDependencyList":{ + "type":"list", + "member":{"shape":"PackageDependency"} + }, + "PackageFormat":{ + "type":"string", + "enum":[ + "npm", + "pypi", + "maven" + ] + }, + "PackageName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^!#/\\s]+" + }, + "PackageNamespace":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^!#/\\s]+" + }, + "PackageSummary":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package. Valid values are:

  • npm

  • pypi

  • maven

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package.

" + } + }, + "documentation":"

Details about a package, including its format, namespace, and name. The ListPackages operation returns a list of PackageSummary objects.

" + }, + "PackageSummaryList":{ + "type":"list", + "member":{"shape":"PackageSummary"} + }, + "PackageVersion":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^!#/\\s]+" + }, + "PackageVersionDescription":{ + "type":"structure", + "members":{ + "format":{ + "shape":"PackageFormat", + "documentation":"

The format of the package version. The valid package formats are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

" + }, + "packageName":{ + "shape":"PackageName", + "documentation":"

The name of the requested package.

" + }, + "displayName":{ + "shape":"String255", + "documentation":"

The name of the package that is displayed. The displayName varies depending on the package version's format. For example, if an npm package is named ui, is in the namespace vue, and has the format npm, then the displayName is @vue/ui.

" + }, + "version":{ + "shape":"PackageVersion", + "documentation":"

The version of the package.

" + }, + "summary":{ + "shape":"String", + "documentation":"

A summary of the package version. The summary is extracted from the package. The information in and detail level of the summary depends on the package version's format.

" + }, + "homePage":{ + "shape":"String", + "documentation":"

The homepage associated with the package.

" + }, + "sourceCodeRepository":{ + "shape":"String", + "documentation":"

The repository for the source code in the package version, or the source code used to build it.

" + }, + "publishedTime":{ + "shape":"Timestamp", + "documentation":"

A timestamp that contains the date and time the package version was published.

" + }, + "licenses":{ + "shape":"LicenseInfoList", + "documentation":"

Information about licenses associated with the package version.

" + }, + "revision":{ + "shape":"PackageVersionRevision", + "documentation":"

The revision of the package version.

" + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

A string that contains the status of the package version. It can be one of the following:

  • Published

  • Unfinished

  • Unlisted

  • Archived

  • Disposed

" + } + }, + "documentation":"

Details about a package version.

" + }, + "PackageVersionError":{ + "type":"structure", + "members":{ + "errorCode":{ + "shape":"PackageVersionErrorCode", + "documentation":"

The error code associated with the error. Valid error codes are:

  • ALREADY_EXISTS

  • MISMATCHED_REVISION

  • MISMATCHED_STATUS

  • NOT_ALLOWED

  • NOT_FOUND

  • SKIPPED

" + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

The error message associated with the error.

" + } + }, + "documentation":"

An error associated with package.

" + }, + "PackageVersionErrorCode":{ + "type":"string", + "enum":[ + "ALREADY_EXISTS", + "MISMATCHED_REVISION", + "MISMATCHED_STATUS", + "NOT_ALLOWED", + "NOT_FOUND", + "SKIPPED" + ] + }, + "PackageVersionErrorMap":{ + "type":"map", + "key":{"shape":"PackageVersion"}, + "value":{"shape":"PackageVersionError"} + }, + "PackageVersionList":{ + "type":"list", + "member":{"shape":"PackageVersion"} + }, + "PackageVersionRevision":{ + "type":"string", + "max":50, + "min":1, + "pattern":"\\S+" + }, + "PackageVersionRevisionMap":{ + "type":"map", + "key":{"shape":"PackageVersion"}, + "value":{"shape":"PackageVersionRevision"} + }, + "PackageVersionSortType":{ + "type":"string", + "enum":["PUBLISHED_TIME"] + }, + "PackageVersionStatus":{ + "type":"string", + "enum":[ + "Published", + "Unfinished", + "Unlisted", + "Archived", + "Disposed", + "Deleted" + ] + }, + "PackageVersionSummary":{ + "type":"structure", + "required":[ + "version", + "status" + ], + "members":{ + "version":{ + "shape":"PackageVersion", + "documentation":"

Information about a package version.

" + }, + "revision":{ + "shape":"PackageVersionRevision", + "documentation":"

The revision associated with a package version.

" + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

A string that contains the status of the package version. It can be one of the following:

  • Published

  • Unfinished

  • Unlisted

  • Archived

  • Disposed

" + } + }, + "documentation":"

Details about a package version, including its status, version, and revision. The ListPackageVersions operation returns a list of PackageVersionSummary objects.

" + }, + "PackageVersionSummaryList":{ + "type":"list", + "member":{"shape":"PackageVersionSummary"} + }, + "PaginationToken":{ + "type":"string", + "max":2000, + "min":1, + "pattern":"\\S+" + }, + "PolicyDocument":{ + "type":"string", + "max":5120, + "min":1 + }, + "PolicyRevision":{ + "type":"string", + "max":100, + "min":1, + "pattern":"\\S+" + }, + "PutDomainPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "policyDocument" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain on which to set the resource policy.

" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

The current revision of the resource policy to be set. This revision is used for optimistic locking, which prevents others from overwriting your changes to the domain's resource policy.

" + }, + "policyDocument":{ + "shape":"PolicyDocument", + "documentation":"

A valid displayable JSON Aspen policy string to be set as the access control resource policy on the provided domain.

" + } + } + }, + "PutDomainPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

The resource policy that was set after processing the request.

" + } + } + }, + "PutRepositoryPermissionsPolicyRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "policyDocument" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain containing the repository to set the resource policy on.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to set the resource policy on.

", + "location":"querystring", + "locationName":"repository" + }, + "policyRevision":{ + "shape":"PolicyRevision", + "documentation":"

Sets the revision of the resource policy that specifies permissions to access the repository. This revision is used for optimistic locking, which prevents others from overwriting your changes to the repository's resource policy.

" + }, + "policyDocument":{ + "shape":"PolicyDocument", + "documentation":"

A valid displayable JSON Aspen policy string to be set as the access control resource policy on the provided repository.

" + } + } + }, + "PutRepositoryPermissionsPolicyResult":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicy", + "documentation":"

The resource policy that was set after processing the request.

" + } + } + }, + "RepositoryDescription":{ + "type":"structure", + "members":{ + "name":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository.

" + }, + "administratorAccount":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that manages the repository.

" + }, + "domainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository.

" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain that contains the repository. It does not include dashes or spaces.

" + }, + "arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the repository.

" + }, + "description":{ + "shape":"Description", + "documentation":"

A text description of the repository.

" + }, + "upstreams":{ + "shape":"UpstreamRepositoryInfoList", + "documentation":"

A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more information, see Working with upstream repositories.

" + }, + "externalConnections":{ + "shape":"RepositoryExternalConnectionInfoList", + "documentation":"

An array of external connections associated with the repository.

" + } + }, + "documentation":"

The details of a repository stored in AWS CodeArtifact. A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets. Repositories are polyglot—a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools like the npm CLI, the Maven CLI (mvn), and pip. You can create up to 100 repositories per AWS account.

" + }, + "RepositoryExternalConnectionInfo":{ + "type":"structure", + "members":{ + "externalConnectionName":{ + "shape":"ExternalConnectionName", + "documentation":"

The name of the external connection associated with a repository.

" + }, + "packageFormat":{ + "shape":"PackageFormat", + "documentation":"

The package format associated with a repository's external connection. The valid package formats are:

  • npm: A Node Package Manager (npm) package.

  • pypi: A Python Package Index (PyPI) package.

  • maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.

" + }, + "status":{ + "shape":"ExternalConnectionStatus", + "documentation":"

The status of the external connection of a repository. There is one valid value, Available.

" + } + }, + "documentation":"

Contains information about the external connection of a repository.

" + }, + "RepositoryExternalConnectionInfoList":{ + "type":"list", + "member":{"shape":"RepositoryExternalConnectionInfo"} + }, + "RepositoryName":{ + "type":"string", + "max":100, + "min":2, + "pattern":"[A-Za-z0-9][A-Za-z0-9._\\-]{1,99}" + }, + "RepositorySummary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository.

" + }, + "administratorAccount":{ + "shape":"AccountId", + "documentation":"

The AWS account ID that manages the repository.

" + }, + "domainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain that contains the repository.

" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

" + }, + "arn":{ + "shape":"Arn", + "documentation":"

The ARN of the repository.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the repository.

" + } + }, + "documentation":"

Details about a repository, including its Amazon Resource Name (ARN), description, and domain information. The ListRepositories operation returns a list of RepositorySummary objects.

" + }, + "RepositorySummaryList":{ + "type":"list", + "member":{"shape":"RepositorySummary"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The ID of the resource.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource.

" + } + }, + "documentation":"

The operation did not succeed because the resource requested is not found in the service.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourcePolicy":{ + "type":"structure", + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the resource associated with the resource policy

" + }, + "revision":{ + "shape":"PolicyRevision", + "documentation":"

The current revision of the resource policy.

" + }, + "document":{ + "shape":"PolicyDocument", + "documentation":"

The resource policy formatted in JSON.

" + } + }, + "documentation":"

An AWS CodeArtifact resource policy that contains a resource ARN, document details, and a revision.

" + }, + "ResourceType":{ + "type":"string", + "enum":[ + "domain", + "repository", + "package", + "package-version", + "asset" + ] + }, + "RetryAfterSeconds":{"type":"integer"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

The ID of the resource.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of AWS resource.

" + } + }, + "documentation":"

The operation did not succeed because it would have exceeded a service limit for your account.

", + "error":{"httpStatusCode":402}, + "exception":true + }, + "String":{"type":"string"}, + "String255":{ + "type":"string", + "max":255, + "min":1 + }, + "SuccessfulPackageVersionInfo":{ + "type":"structure", + "members":{ + "revision":{ + "shape":"String", + "documentation":"

The revision of a package version.

" + }, + "status":{ + "shape":"PackageVersionStatus", + "documentation":"

The status of a package version. Valid statuses are:

  • Published

  • Unfinished

  • Unlisted

  • Archived

  • Disposed

" + } + }, + "documentation":"

Contains the revision and status of a package version.

" + }, + "SuccessfulPackageVersionInfoMap":{ + "type":"map", + "key":{"shape":"PackageVersion"}, + "value":{"shape":"SuccessfulPackageVersionInfo"} + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

The time period, in seconds, to wait before retrying the request.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

The operation did not succeed because too many requests are sent to the service.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "UpdatePackageVersionsStatusRequest":{ + "type":"structure", + "required":[ + "domain", + "repository", + "format", + "package", + "versions", + "targetStatus" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The domain that contains the repository that contains the package versions with a status to be updated.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The repository that contains the package versions with the status you want to update.

", + "location":"querystring", + "locationName":"repository" + }, + "format":{ + "shape":"PackageFormat", + "documentation":"

A format that specifies the type of the package with the statuses to update. The valid values are:

  • npm

  • pypi

  • maven

", + "location":"querystring", + "locationName":"format" + }, + "namespace":{ + "shape":"PackageNamespace", + "documentation":"

The namespace of the package. The package component that specifies its namespace depends on its type. For example:

  • The namespace of a Maven package is its groupId.

  • The namespace of an npm package is its scope.

  • A Python package does not contain a corresponding component, so Python packages do not have a namespace.

", + "location":"querystring", + "locationName":"namespace" + }, + "package":{ + "shape":"PackageName", + "documentation":"

The name of the package with the version statuses to update.

", + "location":"querystring", + "locationName":"package" + }, + "versions":{ + "shape":"PackageVersionList", + "documentation":"

An array of strings that specify the versions of the package with the statuses to update.

" + }, + "versionRevisions":{ + "shape":"PackageVersionRevisionMap", + "documentation":"

A map of package versions and package version revisions. The map key is the package version (for example, 3.5.2), and the map value is the package version revision.

" + }, + "expectedStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

The package version’s expected status before it is updated. If expectedStatus is provided, the package version's status is updated only if its status at the time UpdatePackageVersionsStatus is called matches expectedStatus.

" + }, + "targetStatus":{ + "shape":"PackageVersionStatus", + "documentation":"

The status you want to change the package version status to.

" + } + } + }, + "UpdatePackageVersionsStatusResult":{ + "type":"structure", + "members":{ + "successfulVersions":{ + "shape":"SuccessfulPackageVersionInfoMap", + "documentation":"

A list of PackageVersionError objects, one for each package version with a status that failed to update.

" + }, + "failedVersions":{ + "shape":"PackageVersionErrorMap", + "documentation":"

A list of SuccessfulPackageVersionInfo objects, one for each package version with a status that successfully updated.

" + } + } + }, + "UpdateRepositoryRequest":{ + "type":"structure", + "required":[ + "domain", + "repository" + ], + "members":{ + "domain":{ + "shape":"DomainName", + "documentation":"

The name of the domain associated with the repository to update.

", + "location":"querystring", + "locationName":"domain" + }, + "domainOwner":{ + "shape":"AccountId", + "documentation":"

The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.

", + "location":"querystring", + "locationName":"domain-owner" + }, + "repository":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository to update.

", + "location":"querystring", + "locationName":"repository" + }, + "description":{ + "shape":"Description", + "documentation":"

An updated repository description.

" + }, + "upstreams":{ + "shape":"UpstreamRepositoryList", + "documentation":"

A list of upstream repositories to associate with the repository. The order of the upstream repositories in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more information, see Working with upstream repositories.

" + } + } + }, + "UpdateRepositoryResult":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"RepositoryDescription", + "documentation":"

The updated repository.

" + } + } + }, + "UpstreamRepository":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of an upstream repository.

" + } + }, + "documentation":"

Information about an upstream repository. A list of UpstreamRepository objects is an input parameter to CreateRepository and UpdateRepository .

" + }, + "UpstreamRepositoryInfo":{ + "type":"structure", + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of an upstream repository.

" + } + }, + "documentation":"

Information about an upstream repository.

" + }, + "UpstreamRepositoryInfoList":{ + "type":"list", + "member":{"shape":"UpstreamRepositoryInfo"} + }, + "UpstreamRepositoryList":{ + "type":"list", + "member":{"shape":"UpstreamRepository"} + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

" + } + }, + "documentation":"

The operation did not succeed because a parameter in the request was sent with an invalid value.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "CANNOT_PARSE", + "ENCRYPTION_KEY_ERROR", + "FIELD_VALIDATION_FAILED", + "UNKNOWN_OPERATION", + "OTHER" + ] + } + }, + "documentation":"

AWS CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, and pip. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client.

AWS CodeArtifact Components

Use the information in this guide to help you work with the following CodeArtifact components:

  • Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools like the npm CLI, the Maven CLI ( mvn ), and pip . You can create up to 100 repositories per AWS account.

  • Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in AWS Key Management Service (AWS KMS).

    Each repository is a member of a single domain and can't be moved to a different domain.

    The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages.

    Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization.

  • Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, and Maven package formats.

    In CodeArtifact, a package consists of:

    • A name (for example, webpack is the name of a popular npm package)

    • An optional namespace (for example, @types in @types/node)

    • A set of versions (for example, 1.0.0, 1.0.1, 1.0.2, etc.)

    • Package-level metadata (for example, npm tags)

  • Package version: A version of a package, such as @types/node 12.6.9. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets.

  • Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories.

  • Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz file or Maven POM and JAR files.

CodeArtifact supports these operations:

  • AssociateExternalConnection: Adds an existing external connection to a repository.

  • CopyPackageVersions: Copies package versions from one repository to another repository in the same domain.

  • CreateDomain: Creates a domain

  • CreateRepository: Creates a CodeArtifact repository in a domain.

  • DeleteDomain: Deletes a domain. You cannot delete a domain that contains repositories.

  • DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain.

  • DeletePackageVersions: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage.

  • DeleteRepository: Deletes a repository.

  • DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository.

  • DescribeDomain: Returns a DomainDescription object that contains information about the requested domain.

  • DescribePackageVersion: Returns a PackageVersionDescription object that contains details about a package version.

  • DescribeRepository: Returns a RepositoryDescription object that contains detailed information about the requested repository.

  • DisposePackageVersions: Disposes versions of a package. A package version with the status Disposed cannot be restored because they have been permanently removed from storage.

  • DisassociateExternalConnection: Removes an existing external connection from a repository.

  • GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours.

  • GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to the specified domain.

  • GetPackageVersionAsset: Returns the contents of an asset that is in a package version.

  • GetPackageVersionReadme: Gets the readme file or descriptive text for a package version.

  • GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format:

    • npm

    • pypi

    • maven

  • GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository.

  • ListDomains: Returns a list of DomainSummary objects. Each returned DomainSummary object contains information about a domain.

  • ListPackages: Lists the packages in a repository.

  • ListPackageVersionAssets: Lists the assets for a given package version.

  • ListPackageVersionDependencies: Returns a list of the direct dependencies for a package version.

  • ListPackageVersions: Returns a list of package versions for a specified package in a repository.

  • ListRepositories: Returns a list of repositories owned by the AWS account that called this method.

  • ListRepositoriesInDomain: Returns a list of the repositories in a domain.

  • PutDomainPermissionsPolicy: Attaches a resource policy to a domain.

  • PutRepositoryPermissionsPolicy: Sets the resource policy on a repository that specifies permissions to access it.

  • UpdatePackageVersionsStatus: Updates the status of one or more versions of a package.

  • UpdateRepository: Updates the properties of a repository.

" +} diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 08078ab1de2d..285056b8c08b 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codebuild/src/main/resources/codegen-resources/paginators-1.json b/services/codebuild/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..4ffefbe606a7 100644 --- a/services/codebuild/src/main/resources/codegen-resources/paginators-1.json +++ b/services/codebuild/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,55 @@ { "pagination": { + "DescribeTestCases": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "testCases" + }, + "ListBuilds": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "ids" + }, + "ListBuildsForProject": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "ids" + }, + "ListProjects": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "projects" + }, + "ListReportGroups": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "reportGroups" + }, + "ListReports": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "reports" + }, + "ListReportsForReportGroup": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "reports" + }, + "ListSharedProjects": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "projects" + }, + "ListSharedReportGroups": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "reportGroups" + } } -} +} \ No newline at end of file diff --git a/services/codebuild/src/main/resources/codegen-resources/service-2.json b/services/codebuild/src/main/resources/codegen-resources/service-2.json index 4e46f04643b4..b718e1205313 100644 --- a/services/codebuild/src/main/resources/codegen-resources/service-2.json +++ b/services/codebuild/src/main/resources/codegen-resources/service-2.json @@ -864,6 +864,20 @@ "type":"list", "member":{"shape":"String"} }, + "BuildStatusConfig":{ + "type":"structure", + "members":{ + "context":{ + "shape":"String", + "documentation":"

Specifies the context of the build status CodeBuild sends to the source provider. The usage of this parameter depends on the source provider.

Bitbucket

This parameter is used for the name parameter in the Bitbucket commit status. For more information, see build in the Bitbucket API documentation.

GitHub/GitHub Enterprise Server

This parameter is used for the context parameter in the GitHub commit status. For more information, see Create a commit status in the GitHub developer guide.

" + }, + "targetUrl":{ + "shape":"String", + "documentation":"

Specifies the target url of the build status CodeBuild sends to the source provider. The usage of this parameter depends on the source provider.

Bitbucket

This parameter is used for the url parameter in the Bitbucket commit status. For more information, see build in the Bitbucket API documentation.

GitHub/GitHub Enterprise Server

This parameter is used for the target_url parameter in the GitHub commit status. For more information, see Create a commit status in the GitHub developer guide.

" + } + }, + "documentation":"

Contains information that defines how the AWS CodeBuild build project reports the build status to the source provider.

" + }, "Builds":{ "type":"list", "member":{"shape":"Build"} @@ -984,7 +998,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

A set of tags for this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

" + "documentation":"

A list of tag key and value pairs associated with this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

" }, "vpcConfig":{ "shape":"VpcConfig", @@ -1032,6 +1046,10 @@ "exportConfig":{ "shape":"ReportExportConfig", "documentation":"

A ReportExportConfig object that contains information about where the report group test results are exported.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

A list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

" } } }, @@ -1284,11 +1302,11 @@ }, "value":{ "shape":"String", - "documentation":"

The value of the environment variable.

We strongly discourage the use of PLAINTEXT environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. PLAINTEXT environment variables can be displayed in plain text using the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI). For sensitive values, we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER.

" + "documentation":"

The value of the environment variable.

We strongly discourage the use of PLAINTEXT environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. PLAINTEXT environment variables can be displayed in plain text using the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI). For sensitive values, we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER.

" }, "type":{ "shape":"EnvironmentVariableType", - "documentation":"

The type of environment variable. Valid values include:

  • PARAMETER_STORE: An environment variable stored in Amazon EC2 Systems Manager Parameter Store.

  • PLAINTEXT: An environment variable in plain text format. This is the default value.

  • SECRETS_MANAGER: An environment variable stored in AWS Secrets Manager.

" + "documentation":"

The type of environment variable. Valid values include:

" } }, "documentation":"

Information about an environment variable for a build project or a build.

" @@ -1933,7 +1951,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The tags for this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

" + "documentation":"

A list of tag key and value pairs associated with this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

" }, "created":{ "shape":"Timestamp", @@ -2073,7 +2091,7 @@ "members":{ "type":{ "shape":"EnvironmentType", - "documentation":"

The type of build environment to use for related builds.

  • The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt).

  • The environment type LINUX_CONTAINER with compute type build.general1.2xlarge is available only in regions US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).

  • The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China (Beijing), and China (Ningxia).

" + "documentation":"

The type of build environment to use for related builds.

  • The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt).

  • The environment type LINUX_CONTAINER with compute type build.general1.2xlarge is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).

  • The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China (Beijing), and China (Ningxia).

" }, "image":{ "shape":"NonEmptyString", @@ -2168,7 +2186,7 @@ "members":{ "type":{ "shape":"SourceType", - "documentation":"

The type of repository that contains the source code to be built. Valid values include:

  • BITBUCKET: The source code is in a Bitbucket repository.

  • CODECOMMIT: The source code is in an AWS CodeCommit repository.

  • CODEPIPELINE: The source code settings are specified in the source action of a pipeline in AWS CodePipeline.

  • GITHUB: The source code is in a GitHub repository.

  • GITHUB_ENTERPRISE: The source code is in a GitHub Enterprise repository.

  • NO_SOURCE: The project does not have input source code.

  • S3: The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.

" + "documentation":"

The type of repository that contains the source code to be built. Valid values include:

  • BITBUCKET: The source code is in a Bitbucket repository.

  • CODECOMMIT: The source code is in an AWS CodeCommit repository.

  • CODEPIPELINE: The source code settings are specified in the source action of a pipeline in AWS CodePipeline.

  • GITHUB: The source code is in a GitHub or GitHub Enterprise Cloud repository.

  • GITHUB_ENTERPRISE: The source code is in a GitHub Enterprise Server repository.

  • NO_SOURCE: The project does not have input source code.

  • S3: The source code is in an Amazon Simple Storage Service (Amazon S3) input bucket.

" }, "location":{ "shape":"String", @@ -2194,6 +2212,10 @@ "shape":"WrapperBoolean", "documentation":"

Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.

The status of a build triggered by a webhook is always reported to your source provider.

" }, + "buildStatusConfig":{ + "shape":"BuildStatusConfig", + "documentation":"

Contains information that defines how the build project reports the build status to the source provider. This option is only used when the source provider is GITHUB, GITHUB_ENTERPRISE, or BITBUCKET.

" + }, "insecureSsl":{ "shape":"WrapperBoolean", "documentation":"

Enable this flag to ignore SSL warnings while connecting to the project source code.

" @@ -2390,6 +2412,10 @@ "lastModified":{ "shape":"Timestamp", "documentation":"

The date and time this ReportGroup was last modified.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

A list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

" } }, "documentation":"

A series of reports. Each report contains information about the results from running a series of test cases. You specify the test cases for a report group in the buildspec for a build project using one or more paths to the test case files.

" @@ -2659,6 +2685,10 @@ "shape":"WrapperBoolean", "documentation":"

Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown.

The status of a build triggered by a webhook is always reported to your source provider.

" }, + "buildStatusConfigOverride":{ + "shape":"BuildStatusConfig", + "documentation":"

Contains information that defines how the build project reports the build status to the source provider. This option is only used when the source provider is GITHUB, GITHUB_ENTERPRISE, or BITBUCKET.

" + }, "environmentTypeOverride":{ "shape":"EnvironmentType", "documentation":"

A container type for this build that overrides the one specified in the build project.

" @@ -2701,7 +2731,7 @@ }, "idempotencyToken":{ "shape":"String", - "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 12 hours. If you repeat the StartBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

" + "documentation":"

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 5 minutes. If you repeat the StartBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

" }, "logsConfigOverride":{ "shape":"LogsConfig", @@ -2925,7 +2955,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

The replacement set of tags for this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

" + "documentation":"

An updated list of tag key and value pairs associated with this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

" }, "vpcConfig":{ "shape":"VpcConfig", @@ -2965,6 +2995,10 @@ "exportConfig":{ "shape":"ReportExportConfig", "documentation":"

Used to specify an updated export type. Valid values are:

  • S3: The report results are exported to an S3 bucket.

  • NO_EXPORT: The report results are not exported.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

An updated list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

" } } }, @@ -3011,7 +3045,7 @@ "ValueInput":{ "type":"string", "max":255, - "min":1, + "min":0, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=@+\\-]*)$" }, "VpcConfig":{ @@ -3071,7 +3105,7 @@ "members":{ "type":{ "shape":"WebhookFilterType", - "documentation":"

The type of webhook filter. There are five webhook filter types: EVENT, ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF, and FILE_PATH.

EVENT

A webhook event triggers a build when the provided pattern matches one of four event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, and PULL_REQUEST_REOPENED. The EVENT patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request created, and pull request updated events.

The PULL_REQUEST_REOPENED works with GitHub and GitHub Enterprise only.

ACTOR_ACCOUNT_ID

A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern.

HEAD_REF

A webhook event triggers a build when the head reference matches the regular expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name.

Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.

BASE_REF

A webhook event triggers a build when the base reference matches the regular expression pattern. For example, refs/heads/branch-name.

Works with pull request events only.

FILE_PATH

A webhook triggers a build when the path of a changed file matches the regular expression pattern.

Works with GitHub and GitHub Enterprise push events only.

" + "documentation":"

The type of webhook filter. There are six webhook filter types: EVENT, ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF, FILE_PATH, and COMMIT_MESSAGE.

EVENT

A webhook event triggers a build when the provided pattern matches one of five event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, PULL_REQUEST_REOPENED, and PULL_REQUEST_MERGED. The EVENT patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request created, and pull request updated events.

The PULL_REQUEST_REOPENED works with GitHub and GitHub Enterprise only.

ACTOR_ACCOUNT_ID

A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern.

HEAD_REF

A webhook event triggers a build when the head reference matches the regular expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name.

Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.

BASE_REF

A webhook event triggers a build when the base reference matches the regular expression pattern. For example, refs/heads/branch-name.

Works with pull request events only.

FILE_PATH

A webhook triggers a build when the path of a changed file matches the regular expression pattern.

Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.

COMMIT_MESSAGE

A webhook triggers a build when the head commit message matches the regular expression pattern.

Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.

" }, "pattern":{ "shape":"String", @@ -3091,7 +3125,8 @@ "BASE_REF", "HEAD_REF", "ACTOR_ACCOUNT_ID", - "FILE_PATH" + "FILE_PATH", + "COMMIT_MESSAGE" ] }, "WrapperBoolean":{"type":"boolean"}, diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index 69cdddd89c09..0b975b22d569 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codecommit/src/main/resources/codegen-resources/paginators-1.json b/services/codecommit/src/main/resources/codegen-resources/paginators-1.json index 5fcda36b895d..ab4bae4cd486 100644 --- a/services/codecommit/src/main/resources/codegen-resources/paginators-1.json +++ b/services/codecommit/src/main/resources/codegen-resources/paginators-1.json @@ -10,6 +10,11 @@ "limit_key": "maxResults", "output_token": "nextToken" }, + "GetCommentReactions": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken" + }, "GetCommentsForComparedCommit": { "input_token": "nextToken", "limit_key": "maxResults", diff --git a/services/codecommit/src/main/resources/codegen-resources/service-2.json b/services/codecommit/src/main/resources/codegen-resources/service-2.json index 255175f3143d..95dd51101594 100644 --- a/services/codecommit/src/main/resources/codegen-resources/service-2.json +++ b/services/codecommit/src/main/resources/codegen-resources/service-2.json @@ -695,11 +695,35 @@ "output":{"shape":"GetCommentOutput"}, "errors":[ {"shape":"CommentDoesNotExistException"}, + {"shape":"CommentDeletedException"}, {"shape":"CommentIdRequiredException"}, {"shape":"InvalidCommentIdException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ], + "documentation":"

Returns the content of a comment made on a change, file, or commit in a repository.

Reaction counts might include numbers from user identities who were deleted after the reaction was made. For a count of reactions from active identities, use GetCommentReactions.

" + }, + "GetCommentReactions":{ + "name":"GetCommentReactions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCommentReactionsInput"}, + "output":{"shape":"GetCommentReactionsOutput"}, + "errors":[ + {"shape":"CommentDoesNotExistException"}, + {"shape":"CommentIdRequiredException"}, + {"shape":"InvalidCommentIdException"}, + {"shape":"InvalidReactionUserArnException"}, + {"shape":"InvalidMaxResultsException"}, + {"shape":"InvalidContinuationTokenException"}, {"shape":"CommentDeletedException"} ], - "documentation":"

Returns the content of a comment made on a change, file, or commit in a repository.

" + "documentation":"

Returns information about reactions to a specified comment ID. Reactions from users who have been deleted will not be included in the count.

" }, "GetCommentsForComparedCommit":{ "name":"GetCommentsForComparedCommit", @@ -724,7 +748,7 @@ {"shape":"EncryptionKeyNotFoundException"}, {"shape":"EncryptionKeyUnavailableException"} ], - "documentation":"

Returns information about comments made on the comparison between two commits.

" + "documentation":"

Returns information about comments made on the comparison between two commits.

Reaction counts might include numbers from user identities who were deleted after the reaction was made. For a count of reactions from active identities, use GetCommentReactions.

" }, "GetCommentsForPullRequest":{ "name":"GetCommentsForPullRequest", @@ -753,7 +777,7 @@ {"shape":"EncryptionKeyNotFoundException"}, {"shape":"EncryptionKeyUnavailableException"} ], - "documentation":"

Returns comments made on a pull request.

" + "documentation":"

Returns comments made on a pull request.

Reaction counts might include numbers from user identities who were deleted after the reaction was made. For a count of reactions from active identities, use GetCommentReactions.

" }, "GetCommit":{ "name":"GetCommit", @@ -1496,15 +1520,16 @@ {"shape":"InvalidFilePositionException"}, {"shape":"CommitIdRequiredException"}, {"shape":"InvalidCommitIdException"}, + {"shape":"BeforeCommitIdAndAfterCommitIdAreSameException"}, {"shape":"EncryptionIntegrityChecksFailedException"}, {"shape":"EncryptionKeyAccessDeniedException"}, {"shape":"EncryptionKeyDisabledException"}, {"shape":"EncryptionKeyNotFoundException"}, {"shape":"EncryptionKeyUnavailableException"}, - {"shape":"BeforeCommitIdAndAfterCommitIdAreSameException"}, {"shape":"CommitDoesNotExistException"}, {"shape":"InvalidPathException"}, - {"shape":"PathDoesNotExistException"} + {"shape":"PathDoesNotExistException"}, + {"shape":"PathRequiredException"} ], "documentation":"

Posts a comment on the comparison between two commits.

", "idempotent":true @@ -1536,6 +1561,7 @@ {"shape":"InvalidFilePositionException"}, {"shape":"CommitIdRequiredException"}, {"shape":"InvalidCommitIdException"}, + {"shape":"BeforeCommitIdAndAfterCommitIdAreSameException"}, {"shape":"EncryptionIntegrityChecksFailedException"}, {"shape":"EncryptionKeyAccessDeniedException"}, {"shape":"EncryptionKeyDisabledException"}, @@ -1544,8 +1570,7 @@ {"shape":"CommitDoesNotExistException"}, {"shape":"InvalidPathException"}, {"shape":"PathDoesNotExistException"}, - {"shape":"PathRequiredException"}, - {"shape":"BeforeCommitIdAndAfterCommitIdAreSameException"} + {"shape":"PathRequiredException"} ], "documentation":"

Posts a comment on a pull request.

", "idempotent":true @@ -1571,6 +1596,24 @@ "documentation":"

Posts a comment in reply to an existing comment on a comparison between commits or a pull request.

", "idempotent":true }, + "PutCommentReaction":{ + "name":"PutCommentReaction", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutCommentReactionInput"}, + "errors":[ + {"shape":"CommentDoesNotExistException"}, + {"shape":"CommentIdRequiredException"}, + {"shape":"InvalidCommentIdException"}, + {"shape":"InvalidReactionValueException"}, + {"shape":"ReactionValueRequiredException"}, + {"shape":"ReactionLimitExceededException"}, + {"shape":"CommentDeletedException"} + ], + "documentation":"

Adds or updates a reaction to a specified comment for the user whose identity is used to make the request. You can only add or update a reaction for yourself. You cannot add, modify, or delete a reaction for another user.

" + }, "PutFile":{ "name":"PutFile", "http":{ @@ -2625,7 +2668,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified branch name already exists.

", + "documentation":"

Cannot create the branch with the specified name because the commit conflicts with an existing branch with the same name. Branch names must be unique.

", "exception":true }, "BranchNameIsTagNameException":{ @@ -2646,6 +2689,10 @@ "documentation":"

A branch name is required, but was not specified.

", "exception":true }, + "CallerReactions":{ + "type":"list", + "member":{"shape":"ReactionValue"} + }, "CannotDeleteApprovalRuleFromTemplateException":{ "type":"structure", "members":{ @@ -2713,6 +2760,14 @@ "clientRequestToken":{ "shape":"ClientRequestToken", "documentation":"

A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

" + }, + "callerReactions":{ + "shape":"CallerReactions", + "documentation":"

The emoji reactions to a comment, if any, submitted by the user whose credentials are associated with the call to the API.

" + }, + "reactionCounts":{ + "shape":"ReactionCountsMap", + "documentation":"

A string to integer map that represents the number of individual users who have responded to a comment with the specified reactions.

" } }, "documentation":"

Returns information about a specific comment.

" @@ -3047,6 +3102,7 @@ "member":{"shape":"Conflict"} }, "Content":{"type":"string"}, + "Count":{"type":"integer"}, "CreateApprovalRuleTemplateInput":{ "type":"structure", "required":[ @@ -3060,7 +3116,7 @@ }, "approvalRuleTemplateContent":{ "shape":"ApprovalRuleTemplateContent", - "documentation":"

The content of the approval rule that is created on pull requests in associated repositories. If you specify one or more destination references (branches), approval rules are created in an associated repository only if their destination references (branches) match those specified in the template.

When you create the content of the approval rule template, you can specify approvers in an approval pool in one of two ways:

  • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

    • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

    • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

    This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

  • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The content of the approval rule that is created on pull requests in associated repositories. If you specify one or more destination references (branches), approval rules are created in an associated repository only if their destination references (branches) match those specified in the template.

When you create the content of the approval rule template, you can specify approvers in an approval pool in one of two ways:

  • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

    • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

    • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

    This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

  • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

" }, "approvalRuleTemplateDescription":{ "shape":"ApprovalRuleTemplateDescription", @@ -3193,7 +3249,7 @@ }, "approvalRuleContent":{ "shape":"ApprovalRuleContent", - "documentation":"

The content of the approval rule, including the number of approvals needed and the structure of an approval pool defined for approvals, if any. For more information about approval pools, see the AWS CodeCommit User Guide.

When you create the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

  • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following would be counted as approvals coming from that user:

    • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

    • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

    This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

  • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The content of the approval rule, including the number of approvals needed and the structure of an approval pool defined for approvals, if any. For more information about approval pools, see the AWS CodeCommit User Guide.

When you create the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

  • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following would be counted as approvals coming from that user:

    • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

    • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

    This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

  • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

" } } }, @@ -4104,6 +4160,42 @@ } } }, + "GetCommentReactionsInput":{ + "type":"structure", + "required":["commentId"], + "members":{ + "commentId":{ + "shape":"CommentId", + "documentation":"

The ID of the comment for which you want to get reactions information.

" + }, + "reactionUserArn":{ + "shape":"Arn", + "documentation":"

Optional. The Amazon Resource Name (ARN) of the user or identity for which you want to get reaction information.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

An enumeration token that, when provided in a request, returns the next batch of the results.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

A non-zero, non-negative integer used to limit the number of returned results. The default is the same as the allowed maximum, 1,000.

" + } + } + }, + "GetCommentReactionsOutput":{ + "type":"structure", + "required":["reactionsForComment"], + "members":{ + "reactionsForComment":{ + "shape":"ReactionsForCommentList", + "documentation":"

An array of reactions to the specified comment.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

An enumeration token that can be used in a request to return the next batch of the results.

" + } + } + }, "GetCommentsForComparedCommitInput":{ "type":"structure", "required":[ @@ -4959,6 +5051,20 @@ "documentation":"

The pull request status update is not valid. The only valid update is from OPEN to CLOSED.

", "exception":true }, + "InvalidReactionUserArnException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The Amazon Resource Name (ARN) of the user or identity is not valid.

", + "exception":true + }, + "InvalidReactionValueException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The value of the reaction is not valid. For more information, see the AWS CodeCommit User Guide.

", + "exception":true + }, "InvalidReferenceNameException":{ "type":"structure", "members":{ @@ -6525,6 +6631,23 @@ "type":"list", "member":{"shape":"PullRequestTarget"} }, + "PutCommentReactionInput":{ + "type":"structure", + "required":[ + "commentId", + "reactionValue" + ], + "members":{ + "commentId":{ + "shape":"CommentId", + "documentation":"

The ID of the comment to which you want to add or update a reaction.

" + }, + "reactionValue":{ + "shape":"ReactionValue", + "documentation":"

The emoji reaction you want to add or update. To remove a reaction, provide a value of blank or null. You can also provide the value of none. For information about emoji reaction values supported in AWS CodeCommit, see the AWS CodeCommit User Guide.

" + } + } + }, "PutFileEntries":{ "type":"list", "member":{"shape":"PutFileEntry"} @@ -6656,6 +6779,73 @@ }, "documentation":"

Represents the output of a put repository triggers operation.

" }, + "ReactionCountsMap":{ + "type":"map", + "key":{"shape":"ReactionValue"}, + "value":{"shape":"Count"} + }, + "ReactionEmoji":{"type":"string"}, + "ReactionForComment":{ + "type":"structure", + "members":{ + "reaction":{ + "shape":"ReactionValueFormats", + "documentation":"

The reaction for a specified comment.

" + }, + "reactionUsers":{ + "shape":"ReactionUsersList", + "documentation":"

The Amazon Resource Names (ARNs) of users who have provided reactions to the comment.

" + }, + "reactionsFromDeletedUsersCount":{ + "shape":"Count", + "documentation":"

A numerical count of users who reacted with the specified emoji whose identities have been subsequently deleted from IAM. While these IAM users or roles no longer exist, the reactions might still appear in total reaction counts.

" + } + }, + "documentation":"

Information about the reaction values provided by users on a comment.

" + }, + "ReactionLimitExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The number of reactions has been exceeded. Reactions are limited to one reaction per user for each individual comment ID.

", + "exception":true + }, + "ReactionShortCode":{"type":"string"}, + "ReactionUnicode":{"type":"string"}, + "ReactionUsersList":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "ReactionValue":{"type":"string"}, + "ReactionValueFormats":{ + "type":"structure", + "members":{ + "emoji":{ + "shape":"ReactionEmoji", + "documentation":"

The Emoji Version 1.0 graphic of the reaction. These graphics are interpreted slightly differently on different operating systems.

" + }, + "shortCode":{ + "shape":"ReactionShortCode", + "documentation":"

The emoji short code for the reaction. Short codes are interpreted slightly differently on different operating systems.

" + }, + "unicode":{ + "shape":"ReactionUnicode", + "documentation":"

The Unicode codepoint for the reaction.

" + } + }, + "documentation":"

Information about the values for reactions to a comment. AWS CodeCommit supports a limited set of reactions.

" + }, + "ReactionValueRequiredException":{ + "type":"structure", + "members":{ + }, + "documentation":"

A reaction value is required.

", + "exception":true + }, + "ReactionsForCommentList":{ + "type":"list", + "member":{"shape":"ReactionForComment"} + }, "ReferenceDoesNotExistException":{ "type":"structure", "members":{ @@ -7453,7 +7643,7 @@ }, "newRuleContent":{ "shape":"ApprovalRuleContent", - "documentation":"

The updated content for the approval rule.

When you update the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

  • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

    • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

    • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

    This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

  • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

" + "documentation":"

The updated content for the approval rule.

When you update the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

  • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

    • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

    • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

    This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

  • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

" } } }, @@ -7623,5 +7813,5 @@ }, "blob":{"type":"blob"} }, - "documentation":"AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

  • BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.

  • CreateRepository, which creates an AWS CodeCommit repository.

  • DeleteRepository, which deletes an AWS CodeCommit repository.

  • GetRepository, which returns information about a specified repository.

  • ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.

  • UpdateRepositoryDescription, which sets or updates the description of the repository.

  • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository can access it until you send them the new HTTPS or SSH URL to use.

Branches, by calling the following:

  • CreateBranch, which creates a branch in a specified repository.

  • DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.

  • GetBranch, which returns information about a specified branch.

  • ListBranches, which lists all branches for a specified repository.

  • UpdateDefaultBranch, which changes the default branch for a repository.

Files, by calling the following:

  • DeleteFile, which deletes the content of a specified file from a specified branch.

  • GetBlob, which returns the base-64 encoded content of an individual Git blob object in a repository.

  • GetFile, which returns the base-64 encoded content of a specified file.

  • GetFolder, which returns the contents of a specified folder or directory.

  • PutFile, which adds or modifies a single file in a specified repository and branch.

Commits, by calling the following:

  • BatchGetCommits, which returns information about one or more commits in a repository.

  • CreateCommit, which creates a commit for changes to a repository.

  • GetCommit, which returns information about a commit, including commit messages and author and committer information.

  • GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference).

Merges, by calling the following:

  • BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository.

  • CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts.

  • DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge.

  • GetMergeCommit, which returns information about the merge between a source and destination commit.

  • GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.

  • GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers.

  • MergeBranchesByFastForward, which merges two branches using the fast-forward merge option.

  • MergeBranchesBySquash, which merges two branches using the squash merge option.

  • MergeBranchesByThreeWay, which merges two branches using the three-way merge option.

Pull requests, by calling the following:

Approval rule templates, by calling the following:

Comments in a repository, by calling the following:

Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:

  • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit.

  • TagResource, which adds or updates tags for a resource in AWS CodeCommit.

  • UntagResource, which removes tags for a resource in AWS CodeCommit.

Triggers, by calling the following:

  • GetRepositoryTriggers, which returns information about triggers configured for a repository.

  • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.

  • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

" + "documentation":"AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

  • BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.

  • CreateRepository, which creates an AWS CodeCommit repository.

  • DeleteRepository, which deletes an AWS CodeCommit repository.

  • GetRepository, which returns information about a specified repository.

  • ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.

  • UpdateRepositoryDescription, which sets or updates the description of the repository.

  • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository can access it until you send them the new HTTPS or SSH URL to use.

Branches, by calling the following:

  • CreateBranch, which creates a branch in a specified repository.

  • DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.

  • GetBranch, which returns information about a specified branch.

  • ListBranches, which lists all branches for a specified repository.

  • UpdateDefaultBranch, which changes the default branch for a repository.

Files, by calling the following:

  • DeleteFile, which deletes the content of a specified file from a specified branch.

  • GetBlob, which returns the base-64 encoded content of an individual Git blob object in a repository.

  • GetFile, which returns the base-64 encoded content of a specified file.

  • GetFolder, which returns the contents of a specified folder or directory.

  • PutFile, which adds or modifies a single file in a specified repository and branch.

Commits, by calling the following:

  • BatchGetCommits, which returns information about one or more commits in a repository.

  • CreateCommit, which creates a commit for changes to a repository.

  • GetCommit, which returns information about a commit, including commit messages and author and committer information.

  • GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference).

Merges, by calling the following:

  • BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository.

  • CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts.

  • DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge.

  • GetMergeCommit, which returns information about the merge between a source and destination commit.

  • GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.

  • GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers.

  • MergeBranchesByFastForward, which merges two branches using the fast-forward merge option.

  • MergeBranchesBySquash, which merges two branches using the squash merge option.

  • MergeBranchesByThreeWay, which merges two branches using the three-way merge option.

Pull requests, by calling the following:

Approval rule templates, by calling the following:

Comments in a repository, by calling the following:

Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:

  • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit.

  • TagResource, which adds or updates tags for a resource in AWS CodeCommit.

  • UntagResource, which removes tags for a resource in AWS CodeCommit.

Triggers, by calling the following:

  • GetRepositoryTriggers, which returns information about triggers configured for a repository.

  • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.

  • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

" } diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index 68d8b5d66096..541c8f8b1106 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codedeploy/src/main/resources/codegen-resources/service-2.json b/services/codedeploy/src/main/resources/codegen-resources/service-2.json index 8f7fcc25db9e..be66e86114fd 100644 --- a/services/codedeploy/src/main/resources/codegen-resources/service-2.json +++ b/services/codedeploy/src/main/resources/codegen-resources/service-2.json @@ -63,7 +63,7 @@ {"shape":"ApplicationDoesNotExistException"}, {"shape":"BatchLimitExceededException"} ], - "documentation":"

Gets information about one or more applications. The maximum number of applications that can be returned is 25.

" + "documentation":"

Gets information about one or more applications. The maximum number of applications that can be returned is 100.

" }, "BatchGetDeploymentGroups":{ "name":"BatchGetDeploymentGroups", @@ -121,9 +121,10 @@ {"shape":"DeploymentTargetIdRequiredException"}, {"shape":"InvalidDeploymentTargetIdException"}, {"shape":"DeploymentTargetDoesNotExistException"}, - {"shape":"DeploymentTargetListSizeExceededException"} + {"shape":"DeploymentTargetListSizeExceededException"}, + {"shape":"InstanceDoesNotExistException"} ], - "documentation":"

Returns an array of one or more targets associated with a deployment. This method works with all compute types and should be used instead of the deprecated BatchGetDeploymentInstances. The maximum number of targets that can be returned is 25.

The type of targets returned depends on the deployment's compute platform:

  • EC2/On-premises: Information about EC2 instance targets.

  • AWS Lambda: Information about Lambda functions targets.

  • Amazon ECS: Information about Amazon ECS service targets.

" + "documentation":"

Returns an array of one or more targets associated with a deployment. This method works with all compute types and should be used instead of the deprecated BatchGetDeploymentInstances. The maximum number of targets that can be returned is 25.

The type of targets returned depends on the deployment's compute platform or deployment method:

  • EC2/On-premises: Information about EC2 instance targets.

  • AWS Lambda: Information about Lambda functions targets.

  • Amazon ECS: Information about Amazon ECS service targets.

  • CloudFormation: Information about targets of blue/green deployments initiated by a CloudFormation stack update.

" }, "BatchGetDeployments":{ "name":"BatchGetDeployments", @@ -223,7 +224,8 @@ {"shape":"ThrottlingException"}, {"shape":"InvalidUpdateOutdatedInstancesOnlyValueException"}, {"shape":"InvalidIgnoreApplicationStopFailuresValueException"}, - {"shape":"InvalidGitHubAccountTokenException"} + {"shape":"InvalidGitHubAccountTokenException"}, + {"shape":"InvalidTrafficRoutingConfigurationException"} ], "documentation":"

Deploys an application revision through the specified deployment group.

" }, @@ -286,7 +288,8 @@ {"shape":"InvalidECSServiceException"}, {"shape":"InvalidTargetGroupPairException"}, {"shape":"ECSServiceMappingLimitExceededException"}, - {"shape":"InvalidTagsToAddException"} + {"shape":"InvalidTagsToAddException"}, + {"shape":"InvalidTrafficRoutingConfigurationException"} ], "documentation":"

Creates a deployment group to which application revisions are deployed.

" }, @@ -353,6 +356,17 @@ ], "documentation":"

Deletes a GitHub account connection.

" }, + "DeleteResourcesByExternalId":{ + "name":"DeleteResourcesByExternalId", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteResourcesByExternalIdInput"}, + "output":{"shape":"DeleteResourcesByExternalIdOutput"}, + "errors":[], + "documentation":"

Deletes resources linked to an external ID.

" + }, "DeregisterOnPremisesInstance":{ "name":"DeregisterOnPremisesInstance", "http":{ @@ -589,7 +603,7 @@ {"shape":"InvalidTargetFilterNameException"}, {"shape":"InvalidComputePlatformException"} ], - "documentation":"

The newer BatchGetDeploymentTargets should be used instead because it works with all compute types. ListDeploymentInstances throws an exception if it is used with a compute platform other than EC2/On-premises or AWS Lambda.

Lists the instance for a deployment associated with the IAM user or AWS account.

", + "documentation":"

The newer BatchGetDeploymentTargets should be used instead because it works with all compute types. ListDeploymentInstances throws an exception if it is used with a compute platform other than EC2/On-premises or AWS Lambda.

Lists the instance for a deployment associated with the IAM user or AWS account.

", "deprecated":true, "deprecatedMessage":"This operation is deprecated, use ListDeploymentTargets instead." }, @@ -630,7 +644,9 @@ {"shape":"DeploymentGroupNameRequiredException"}, {"shape":"InvalidTimeRangeException"}, {"shape":"InvalidDeploymentStatusException"}, - {"shape":"InvalidNextTokenException"} + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidExternalIdException"}, + {"shape":"InvalidInputException"} ], "documentation":"

Lists the deployments in a deployment group for an application registered with the IAM user or AWS account.

" }, @@ -677,7 +693,7 @@ {"shape":"InvalidArnException"}, {"shape":"ResourceArnRequiredException"} ], - "documentation":"

Returns a list of tags for the resource identified by a specified ARN. Tags are used to organize and categorize your CodeDeploy resources.

" + "documentation":"

Returns a list of tags for the resource identified by a specified Amazon Resource Name (ARN). Tags are used to organize and categorize your CodeDeploy resources.

" }, "PutLifecycleEventHookExecutionStatus":{ "name":"PutLifecycleEventHookExecutionStatus", @@ -696,7 +712,7 @@ {"shape":"InvalidDeploymentIdException"}, {"shape":"UnsupportedActionForDeploymentTypeException"} ], - "documentation":"

Sets the result of a Lambda validation function. The function validates one or both lifecycle events (BeforeAllowTraffic and AfterAllowTraffic) and returns Succeeded or Failed.

" + "documentation":"

Sets the result of a Lambda validation function. The function validates lifecycle hooks during a deployment that uses the AWS Lambda or Amazon ECS compute platform. For AWS Lambda deployments, the available lifecycle hooks are BeforeAllowTraffic and AfterAllowTraffic. For Amazon ECS deployments, the available lifecycle hooks are BeforeInstall, AfterInstall, AfterAllowTestTraffic, BeforeAllowTraffic, and AfterAllowTraffic. Lambda validation functions return Succeeded or Failed. For more information, see AppSpec 'hooks' Section for an AWS Lambda Deployment and AppSpec 'hooks' Section for an Amazon ECS Deployment.

" }, "RegisterApplicationRevision":{ "name":"RegisterApplicationRevision", @@ -786,7 +802,8 @@ {"shape":"DeploymentDoesNotExistException"}, {"shape":"DeploymentGroupDoesNotExistException"}, {"shape":"DeploymentAlreadyCompletedException"}, - {"shape":"InvalidDeploymentIdException"} + {"shape":"InvalidDeploymentIdException"}, + {"shape":"UnsupportedActionForDeploymentTypeException"} ], "documentation":"

Attempts to stop an ongoing deployment.

" }, @@ -828,7 +845,7 @@ {"shape":"ArnNotSupportedException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Disassociates a resource from a list of tags. The resource is identified by the ResourceArn input parameter. The tags are identfied by the list of keys in the TagKeys input parameter.

" + "documentation":"

Disassociates a resource from a list of tags. The resource is identified by the ResourceArn input parameter. The tags are identified by the list of keys in the TagKeys input parameter.

" }, "UpdateApplication":{ "name":"UpdateApplication", @@ -883,7 +900,8 @@ {"shape":"ThrottlingException"}, {"shape":"InvalidECSServiceException"}, {"shape":"InvalidTargetGroupPairException"}, - {"shape":"ECSServiceMappingLimitExceededException"} + {"shape":"ECSServiceMappingLimitExceededException"}, + {"shape":"InvalidTrafficRoutingConfigurationException"} ], "documentation":"

Changes information about a deployment group.

" } @@ -931,7 +949,7 @@ }, "ignorePollAlarmFailure":{ "shape":"Boolean", - "documentation":"

Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from Amazon CloudWatch. The default value is false.

  • true: The deployment proceeds even if alarm status information can't be retrieved from Amazon CloudWatch.

  • false: The deployment stops if alarm status information can't be retrieved from Amazon CloudWatch.

" + "documentation":"

Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from Amazon CloudWatch. The default value is false.

  • true: The deployment proceeds even if alarm status information can't be retrieved from Amazon CloudWatch.

  • false: The deployment stops if alarm status information can't be retrieved from Amazon CloudWatch.

" }, "alarms":{ "shape":"AlarmList", @@ -1124,7 +1142,7 @@ "documentation":"

An array of RevisionLocation objects that specify information to get about the application revisions, including type and location. The maximum number of RevisionLocation objects you can specify is 25.

" } }, - "documentation":"

Represents the input of a BatchGetApplicationRevisions operation.

" + "documentation":"

Represents the input of a BatchGetApplicationRevisions operation.

" }, "BatchGetApplicationRevisionsOutput":{ "type":"structure", @@ -1142,7 +1160,7 @@ "documentation":"

Additional information about the revisions, including the type and location.

" } }, - "documentation":"

Represents the output of a BatchGetApplicationRevisions operation.

" + "documentation":"

Represents the output of a BatchGetApplicationRevisions operation.

" }, "BatchGetApplicationsInput":{ "type":"structure", @@ -1150,10 +1168,10 @@ "members":{ "applicationNames":{ "shape":"ApplicationsList", - "documentation":"

A list of application names separated by spaces. The maximum number of application names you can specify is 25.

" + "documentation":"

A list of application names separated by spaces. The maximum number of application names you can specify is 100.

" } }, - "documentation":"

Represents the input of a BatchGetApplications operation.

" + "documentation":"

Represents the input of a BatchGetApplications operation.

" }, "BatchGetApplicationsOutput":{ "type":"structure", @@ -1163,7 +1181,7 @@ "documentation":"

Information about the applications.

" } }, - "documentation":"

Represents the output of a BatchGetApplications operation.

" + "documentation":"

Represents the output of a BatchGetApplications operation.

" }, "BatchGetDeploymentGroupsInput":{ "type":"structure", @@ -1181,7 +1199,7 @@ "documentation":"

The names of the deployment groups.

" } }, - "documentation":"

Represents the input of a BatchGetDeploymentGroups operation.

" + "documentation":"

Represents the input of a BatchGetDeploymentGroups operation.

" }, "BatchGetDeploymentGroupsOutput":{ "type":"structure", @@ -1195,7 +1213,7 @@ "documentation":"

Information about errors that might have occurred during the API call.

" } }, - "documentation":"

Represents the output of a BatchGetDeploymentGroups operation.

" + "documentation":"

Represents the output of a BatchGetDeploymentGroups operation.

" }, "BatchGetDeploymentInstancesInput":{ "type":"structure", @@ -1213,7 +1231,7 @@ "documentation":"

The unique IDs of instances used in the deployment. The maximum number of instance IDs you can specify is 25.

" } }, - "documentation":"

Represents the input of a BatchGetDeploymentInstances operation.

" + "documentation":"

Represents the input of a BatchGetDeploymentInstances operation.

" }, "BatchGetDeploymentInstancesOutput":{ "type":"structure", @@ -1227,7 +1245,7 @@ "documentation":"

Information about errors that might have occurred during the API call.

" } }, - "documentation":"

Represents the output of a BatchGetDeploymentInstances operation.

" + "documentation":"

Represents the output of a BatchGetDeploymentInstances operation.

" }, "BatchGetDeploymentTargetsInput":{ "type":"structure", @@ -1238,7 +1256,7 @@ }, "targetIds":{ "shape":"TargetIdList", - "documentation":"

The unique IDs of the deployment targets. The compute platform of the deployment determines the type of the targets and their formats. The maximum number of deployment target IDs you can specify is 25.

  • For deployments that use the EC2/On-premises compute platform, the target IDs are EC2 or on-premises instances IDs, and their target type is instanceTarget.

  • For deployments that use the AWS Lambda compute platform, the target IDs are the names of Lambda functions, and their target type is instanceTarget.

  • For deployments that use the Amazon ECS compute platform, the target IDs are pairs of Amazon ECS clusters and services specified using the format <clustername>:<servicename>. Their target type is ecsTarget.

" + "documentation":"

The unique IDs of the deployment targets. The compute platform of the deployment determines the type of the targets and their formats. The maximum number of deployment target IDs you can specify is 25.

  • For deployments that use the EC2/On-premises compute platform, the target IDs are EC2 or on-premises instances IDs, and their target type is instanceTarget.

  • For deployments that use the AWS Lambda compute platform, the target IDs are the names of Lambda functions, and their target type is instanceTarget.

  • For deployments that use the Amazon ECS compute platform, the target IDs are pairs of Amazon ECS clusters and services specified using the format <clustername>:<servicename>. Their target type is ecsTarget.

  • For deployments that are deployed with AWS CloudFormation, the target IDs are CloudFormation stack IDs. Their target type is cloudFormationTarget.

" } } }, @@ -1247,7 +1265,7 @@ "members":{ "deploymentTargets":{ "shape":"DeploymentTargetList", - "documentation":"

A list of target objects for a deployment. Each target object contains details about the target, such as its status and lifecycle events. The type of the target objects depends on the deployment' compute platform.

  • EC2/On-premises: Each target object is an EC2 or on-premises instance.

  • AWS Lambda: The target object is a specific version of an AWS Lambda function.

  • Amazon ECS: The target object is an Amazon ECS service.

" + "documentation":"

A list of target objects for a deployment. Each target object contains details about the target, such as its status and lifecycle events. The type of the target objects depends on the deployment' compute platform.

  • EC2/On-premises: Each target object is an EC2 or on-premises instance.

  • AWS Lambda: The target object is a specific version of an AWS Lambda function.

  • Amazon ECS: The target object is an Amazon ECS service.

  • CloudFormation: The target object is an AWS CloudFormation blue/green deployment.

" } } }, @@ -1260,7 +1278,7 @@ "documentation":"

A list of deployment IDs, separated by spaces. The maximum number of deployment IDs you can specify is 25.

" } }, - "documentation":"

Represents the input of a BatchGetDeployments operation.

" + "documentation":"

Represents the input of a BatchGetDeployments operation.

" }, "BatchGetDeploymentsOutput":{ "type":"structure", @@ -1270,7 +1288,7 @@ "documentation":"

Information about the deployments.

" } }, - "documentation":"

Represents the output of a BatchGetDeployments operation.

" + "documentation":"

Represents the output of a BatchGetDeployments operation.

" }, "BatchGetOnPremisesInstancesInput":{ "type":"structure", @@ -1281,7 +1299,7 @@ "documentation":"

The names of the on-premises instances about which to get information. The maximum number of instance names you can specify is 25.

" } }, - "documentation":"

Represents the input of a BatchGetOnPremisesInstances operation.

" + "documentation":"

Represents the input of a BatchGetOnPremisesInstances operation.

" }, "BatchGetOnPremisesInstancesOutput":{ "type":"structure", @@ -1291,7 +1309,7 @@ "documentation":"

Information about the on-premises instances.

" } }, - "documentation":"

Represents the output of a BatchGetOnPremisesInstances operation.

" + "documentation":"

Represents the output of a BatchGetOnPremisesInstances operation.

" }, "BatchLimitExceededException":{ "type":"structure", @@ -1323,7 +1341,7 @@ "members":{ "action":{ "shape":"InstanceAction", - "documentation":"

The action to take on instances in the original environment after a successful blue/green deployment.

  • TERMINATE: Instances are terminated after a specified wait time.

  • KEEP_ALIVE: Instances are left running after they are deregistered from the load balancer and removed from the deployment group.

" + "documentation":"

The action to take on instances in the original environment after a successful blue/green deployment.

  • TERMINATE: Instances are terminated after a specified wait time.

  • KEEP_ALIVE: Instances are left running after they are deregistered from the load balancer and removed from the deployment group.

" }, "terminationWaitTimeInMinutes":{ "shape":"Duration", @@ -1350,6 +1368,41 @@ "JSON" ] }, + "CloudFormationResourceType":{"type":"string"}, + "CloudFormationTarget":{ + "type":"structure", + "members":{ + "deploymentId":{ + "shape":"DeploymentId", + "documentation":"

The unique ID of an AWS CloudFormation blue/green deployment.

" + }, + "targetId":{ + "shape":"TargetId", + "documentation":"

The unique ID of a deployment target that has a type of CloudFormationTarget.

" + }, + "lastUpdatedAt":{ + "shape":"Time", + "documentation":"

The date and time when the target application was updated by an AWS CloudFormation blue/green deployment.

" + }, + "lifecycleEvents":{ + "shape":"LifecycleEventList", + "documentation":"

The lifecycle events of the AWS CloudFormation blue/green deployment to this target application.

" + }, + "status":{ + "shape":"TargetStatus", + "documentation":"

The status of an AWS CloudFormation blue/green deployment's target application.

" + }, + "resourceType":{ + "shape":"CloudFormationResourceType", + "documentation":"

The resource type for the AWS CloudFormation blue/green deployment.

" + }, + "targetVersionWeight":{ + "shape":"TrafficWeight", + "documentation":"

The percentage of production traffic that the target version of an AWS CloudFormation blue/green deployment receives.

" + } + }, + "documentation":"

Information about the target to be updated by an AWS CloudFormation blue/green deployment. This target type is used for all deployments initiated by a CloudFormation stack update.

" + }, "CommitId":{"type":"string"}, "ComputePlatform":{ "type":"string", @@ -1368,7 +1421,7 @@ }, "deploymentWaitType":{ "shape":"DeploymentWaitType", - "documentation":"

The status of the deployment's waiting period. READY_WAIT indicates the deployment is ready to start shifting traffic. TERMINATION_WAIT indicates the traffic is shifted, but the original target is not terminated.

" + "documentation":"

The status of the deployment's waiting period. READY_WAIT indicates that the deployment is ready to start shifting traffic. TERMINATION_WAIT indicates that the traffic is shifted, but the original target is not terminated.

" } } }, @@ -1389,7 +1442,7 @@ "documentation":"

The metadata that you apply to CodeDeploy applications to help you organize and categorize them. Each tag consists of a key and an optional value, both of which you define.

" } }, - "documentation":"

Represents the input of a CreateApplication operation.

" + "documentation":"

Represents the input of a CreateApplication operation.

" }, "CreateApplicationOutput":{ "type":"structure", @@ -1399,7 +1452,7 @@ "documentation":"

A unique application ID.

" } }, - "documentation":"

Represents the output of a CreateApplication operation.

" + "documentation":"

Represents the output of a CreateApplication operation.

" }, "CreateDeploymentConfigInput":{ "type":"structure", @@ -1411,7 +1464,7 @@ }, "minimumHealthyHosts":{ "shape":"MinimumHealthyHosts", - "documentation":"

The minimum number of healthy instances that should be available at any time during the deployment. There are two parameters expected in the input: type and value.

The type parameter takes either of the following values:

  • HOST_COUNT: The value parameter represents the minimum number of healthy instances as an absolute value.

  • FLEET_PERCENT: The value parameter represents the minimum number of healthy instances as a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances.

The value parameter takes an integer.

For example, to set a minimum of 95% healthy instance, specify a type of FLEET_PERCENT and a value of 95.

" + "documentation":"

The minimum number of healthy instances that should be available at any time during the deployment. There are two parameters expected in the input: type and value.

The type parameter takes either of the following values:

  • HOST_COUNT: The value parameter represents the minimum number of healthy instances as an absolute value.

  • FLEET_PERCENT: The value parameter represents the minimum number of healthy instances as a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the deployment, AWS CodeDeploy converts the percentage to the equivalent number of instances and rounds up fractional instances.

The value parameter takes an integer.

For example, to set a minimum of 95% healthy instance, specify a type of FLEET_PERCENT and a value of 95.

" }, "trafficRoutingConfig":{ "shape":"TrafficRoutingConfig", @@ -1422,7 +1475,7 @@ "documentation":"

The destination platform type for the deployment (Lambda, Server, or ECS).

" } }, - "documentation":"

Represents the input of a CreateDeploymentConfig operation.

" + "documentation":"

Represents the input of a CreateDeploymentConfig operation.

" }, "CreateDeploymentConfigOutput":{ "type":"structure", @@ -1432,7 +1485,7 @@ "documentation":"

A unique deployment configuration ID.

" } }, - "documentation":"

Represents the output of a CreateDeploymentConfig operation.

" + "documentation":"

Represents the output of a CreateDeploymentConfig operation.

" }, "CreateDeploymentGroupInput":{ "type":"structure", @@ -1452,7 +1505,7 @@ }, "deploymentConfigName":{ "shape":"DeploymentConfigName", - "documentation":"

If specified, the deployment configuration name can be either one of the predefined configurations provided with AWS CodeDeploy or a custom deployment configuration that you create by calling the create deployment configuration operation.

CodeDeployDefault.OneAtATime is the default deployment configuration. It is used if a configuration isn't specified for the deployment or deployment group.

For more information about the predefined deployment configurations in AWS CodeDeploy, see Working with Deployment Groups in AWS CodeDeploy in the AWS CodeDeploy User Guide.

" + "documentation":"

If specified, the deployment configuration name can be either one of the predefined configurations provided with AWS CodeDeploy or a custom deployment configuration that you create by calling the create deployment configuration operation.

CodeDeployDefault.OneAtATime is the default deployment configuration. It is used if a configuration isn't specified for the deployment or deployment group.

For more information about the predefined deployment configurations in AWS CodeDeploy, see Working with Deployment Configurations in CodeDeploy in the AWS CodeDeploy User Guide.

" }, "ec2TagFilters":{ "shape":"EC2TagFilterList", @@ -1460,7 +1513,7 @@ }, "onPremisesInstanceTagFilters":{ "shape":"TagFilterList", - "documentation":"

The on-premises instance tags on which to filter. The deployment group includes on-premises instances with any of the specified tags. Cannot be used in the same call as OnPremisesTagSet.

" + "documentation":"

The on-premises instance tags on which to filter. The deployment group includes on-premises instances with any of the specified tags. Cannot be used in the same call as OnPremisesTagSet.

" }, "autoScalingGroups":{ "shape":"AutoScalingGroupNameList", @@ -1468,11 +1521,11 @@ }, "serviceRoleArn":{ "shape":"Role", - "documentation":"

A service role ARN that allows AWS CodeDeploy to act on the user's behalf when interacting with AWS services.

" + "documentation":"

A service role Amazon Resource Name (ARN) that allows AWS CodeDeploy to act on the user's behalf when interacting with AWS services.

" }, "triggerConfigurations":{ "shape":"TriggerConfigList", - "documentation":"

Information about triggers to create when the deployment group is created. For examples, see Create a Trigger for an AWS CodeDeploy Event in the AWS CodeDeploy User Guide.

" + "documentation":"

Information about triggers to create when the deployment group is created. For examples, see Create a Trigger for an AWS CodeDeploy Event in the AWS CodeDeploy User Guide.

" }, "alarmConfiguration":{ "shape":"AlarmConfiguration", @@ -1496,7 +1549,7 @@ }, "ec2TagSet":{ "shape":"EC2TagSet", - "documentation":"

Information about groups of tags applied to EC2 instances. The deployment group includes only EC2 instances identified by all the tag groups. Cannot be used in the same call as ec2TagFilters.

" + "documentation":"

Information about groups of tags applied to EC2 instances. The deployment group includes only EC2 instances identified by all the tag groups. Cannot be used in the same call as ec2TagFilters.

" }, "ecsServices":{ "shape":"ECSServiceList", @@ -1504,14 +1557,14 @@ }, "onPremisesTagSet":{ "shape":"OnPremisesTagSet", - "documentation":"

Information about groups of tags applied to on-premises instances. The deployment group includes only on-premises instances identified by all of the tag groups. Cannot be used in the same call as onPremisesInstanceTagFilters.

" + "documentation":"

Information about groups of tags applied to on-premises instances. The deployment group includes only on-premises instances identified by all of the tag groups. Cannot be used in the same call as onPremisesInstanceTagFilters.

" }, "tags":{ "shape":"TagList", "documentation":"

The metadata that you apply to CodeDeploy deployment groups to help you organize and categorize them. Each tag consists of a key and an optional value, both of which you define.

" } }, - "documentation":"

Represents the input of a CreateDeploymentGroup operation.

" + "documentation":"

Represents the input of a CreateDeploymentGroup operation.

" }, "CreateDeploymentGroupOutput":{ "type":"structure", @@ -1521,7 +1574,7 @@ "documentation":"

A unique deployment group ID.

" } }, - "documentation":"

Represents the output of a CreateDeploymentGroup operation.

" + "documentation":"

Represents the output of a CreateDeploymentGroup operation.

" }, "CreateDeploymentInput":{ "type":"structure", @@ -1541,7 +1594,7 @@ }, "deploymentConfigName":{ "shape":"DeploymentConfigName", - "documentation":"

The name of a deployment configuration associated with the IAM user or AWS account.

If not specified, the value configured in the deployment group is used as the default. If the deployment group does not have a deployment configuration associated with it, CodeDeployDefault.OneAtATime is used by default.

" + "documentation":"

The name of a deployment configuration associated with the IAM user or AWS account.

If not specified, the value configured in the deployment group is used as the default. If the deployment group does not have a deployment configuration associated with it, CodeDeployDefault.OneAtATime is used by default.

" }, "description":{ "shape":"Description", @@ -1549,7 +1602,7 @@ }, "ignoreApplicationStopFailures":{ "shape":"Boolean", - "documentation":"

If true, then if an ApplicationStop, BeforeBlockTraffic, or AfterBlockTraffic deployment lifecycle event to an instance fails, then the deployment continues to the next deployment lifecycle event. For example, if ApplicationStop fails, the deployment continues with DownloadBundle. If BeforeBlockTraffic fails, the deployment continues with BlockTraffic. If AfterBlockTraffic fails, the deployment continues with ApplicationStop.

If false or not specified, then if a lifecycle event fails during a deployment to an instance, that deployment fails. If deployment to that instance is part of an overall deployment and the number of healthy hosts is not less than the minimum number of healthy hosts, then a deployment to the next instance is attempted.

During a deployment, the AWS CodeDeploy agent runs the scripts specified for ApplicationStop, BeforeBlockTraffic, and AfterBlockTraffic in the AppSpec file from the previous successful deployment. (All other scripts are run from the AppSpec file in the current deployment.) If one of these scripts contains an error and does not run successfully, the deployment can fail.

If the cause of the failure is a script from the last successful deployment that will never run successfully, create a new deployment and use ignoreApplicationStopFailures to specify that the ApplicationStop, BeforeBlockTraffic, and AfterBlockTraffic failures should be ignored.

" + "documentation":"

If true, then if an ApplicationStop, BeforeBlockTraffic, or AfterBlockTraffic deployment lifecycle event to an instance fails, then the deployment continues to the next deployment lifecycle event. For example, if ApplicationStop fails, the deployment continues with DownloadBundle. If BeforeBlockTraffic fails, the deployment continues with BlockTraffic. If AfterBlockTraffic fails, the deployment continues with ApplicationStop.

If false or not specified, then if a lifecycle event fails during a deployment to an instance, that deployment fails. If deployment to that instance is part of an overall deployment and the number of healthy hosts is not less than the minimum number of healthy hosts, then a deployment to the next instance is attempted.

During a deployment, the AWS CodeDeploy agent runs the scripts specified for ApplicationStop, BeforeBlockTraffic, and AfterBlockTraffic in the AppSpec file from the previous successful deployment. (All other scripts are run from the AppSpec file in the current deployment.) If one of these scripts contains an error and does not run successfully, the deployment can fail.

If the cause of the failure is a script from the last successful deployment that will never run successfully, create a new deployment and use ignoreApplicationStopFailures to specify that the ApplicationStop, BeforeBlockTraffic, and AfterBlockTraffic failures should be ignored.

" }, "targetInstances":{ "shape":"TargetInstances", @@ -1565,10 +1618,10 @@ }, "fileExistsBehavior":{ "shape":"FileExistsBehavior", - "documentation":"

Information about how AWS CodeDeploy handles files that already exist in a deployment target location but weren't part of the previous successful deployment.

The fileExistsBehavior parameter takes any of the following values:

  • DISALLOW: The deployment fails. This is also the default behavior if no option is specified.

  • OVERWRITE: The version of the file from the application revision currently being deployed replaces the version already on the instance.

  • RETAIN: The version of the file already on the instance is kept and used as part of the new deployment.

" + "documentation":"

Information about how AWS CodeDeploy handles files that already exist in a deployment target location but weren't part of the previous successful deployment.

The fileExistsBehavior parameter takes any of the following values:

  • DISALLOW: The deployment fails. This is also the default behavior if no option is specified.

  • OVERWRITE: The version of the file from the application revision currently being deployed replaces the version already on the instance.

  • RETAIN: The version of the file already on the instance is kept and used as part of the new deployment.

" } }, - "documentation":"

Represents the input of a CreateDeployment operation.

" + "documentation":"

Represents the input of a CreateDeployment operation.

" }, "CreateDeploymentOutput":{ "type":"structure", @@ -1578,7 +1631,7 @@ "documentation":"

The unique ID of a deployment.

" } }, - "documentation":"

Represents the output of a CreateDeployment operation.

" + "documentation":"

Represents the output of a CreateDeployment operation.

" }, "DeleteApplicationInput":{ "type":"structure", @@ -1589,7 +1642,7 @@ "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" } }, - "documentation":"

Represents the input of a DeleteApplication operation.

" + "documentation":"

Represents the input of a DeleteApplication operation.

" }, "DeleteDeploymentConfigInput":{ "type":"structure", @@ -1600,7 +1653,7 @@ "documentation":"

The name of a deployment configuration associated with the IAM user or AWS account.

" } }, - "documentation":"

Represents the input of a DeleteDeploymentConfig operation.

" + "documentation":"

Represents the input of a DeleteDeploymentConfig operation.

" }, "DeleteDeploymentGroupInput":{ "type":"structure", @@ -1618,7 +1671,7 @@ "documentation":"

The name of a deployment group for the specified application.

" } }, - "documentation":"

Represents the input of a DeleteDeploymentGroup operation.

" + "documentation":"

Represents the input of a DeleteDeploymentGroup operation.

" }, "DeleteDeploymentGroupOutput":{ "type":"structure", @@ -1628,7 +1681,7 @@ "documentation":"

If the output contains no data, and the corresponding deployment group contained at least one Auto Scaling group, AWS CodeDeploy successfully removed all corresponding Auto Scaling lifecycle event hooks from the Amazon EC2 instances in the Auto Scaling group. If the output contains data, AWS CodeDeploy could not remove some Auto Scaling lifecycle event hooks from the Amazon EC2 instances in the Auto Scaling group.

" } }, - "documentation":"

Represents the output of a DeleteDeploymentGroup operation.

" + "documentation":"

Represents the output of a DeleteDeploymentGroup operation.

" }, "DeleteGitHubAccountTokenInput":{ "type":"structure", @@ -1638,7 +1691,7 @@ "documentation":"

The name of the GitHub account connection to delete.

" } }, - "documentation":"

Represents the input of a DeleteGitHubAccount operation.

" + "documentation":"

Represents the input of a DeleteGitHubAccount operation.

" }, "DeleteGitHubAccountTokenOutput":{ "type":"structure", @@ -1648,7 +1701,21 @@ "documentation":"

The name of the GitHub account connection that was deleted.

" } }, - "documentation":"

Represents the output of a DeleteGitHubAccountToken operation.

" + "documentation":"

Represents the output of a DeleteGitHubAccountToken operation.

" + }, + "DeleteResourcesByExternalIdInput":{ + "type":"structure", + "members":{ + "externalId":{ + "shape":"ExternalId", + "documentation":"

The unique ID of an external resource (for example, a CloudFormation stack ID) that is linked to one or more CodeDeploy resources.

" + } + } + }, + "DeleteResourcesByExternalIdOutput":{ + "type":"structure", + "members":{ + } }, "DeploymentAlreadyCompletedException":{ "type":"structure", @@ -1668,7 +1735,7 @@ "type":"structure", "members":{ }, - "documentation":"

A deployment configuration with the specified name with the IAM user or AWS account already exists .

", + "documentation":"

A deployment configuration with the specified name with the IAM user or AWS account already exists.

", "exception":true }, "DeploymentConfigDoesNotExistException":{ @@ -1711,7 +1778,7 @@ }, "trafficRoutingConfig":{ "shape":"TrafficRoutingConfig", - "documentation":"

The configuration that specifies how the deployment traffic is routed. Only deployments with a Lambda compute platform can specify this.

" + "documentation":"

The configuration that specifies how the deployment traffic is routed. Used for deployments with a Lambda or ECS compute platform only.

" } }, "documentation":"

Information about a deployment configuration.

" @@ -1744,7 +1811,10 @@ "enum":[ "user", "autoscaling", - "codeDeployRollback" + "codeDeployRollback", + "CodeDeploy", + "CloudFormation", + "CloudFormationRollback" ] }, "DeploymentDoesNotExistException":{ @@ -1951,7 +2021,7 @@ }, "creator":{ "shape":"DeploymentCreator", - "documentation":"

The means by which the deployment was created:

  • user: A user created the deployment.

  • autoscaling: Amazon EC2 Auto Scaling created the deployment.

  • codeDeployRollback: A rollback process created the deployment.

" + "documentation":"

The means by which the deployment was created:

  • user: A user created the deployment.

  • autoscaling: Amazon EC2 Auto Scaling created the deployment.

  • codeDeployRollback: A rollback process created the deployment.

" }, "ignoreApplicationStopFailures":{ "shape":"Boolean", @@ -1995,7 +2065,7 @@ }, "fileExistsBehavior":{ "shape":"FileExistsBehavior", - "documentation":"

Information about how AWS CodeDeploy handles files that already exist in a deployment target location but weren't part of the previous successful deployment.

  • DISALLOW: The deployment fails. This is also the default behavior if no option is specified.

  • OVERWRITE: The version of the file from the application revision currently being deployed replaces the version already on the instance.

  • RETAIN: The version of the file already on the instance is kept and used as part of the new deployment.

" + "documentation":"

Information about how AWS CodeDeploy handles files that already exist in a deployment target location but weren't part of the previous successful deployment.

  • DISALLOW: The deployment fails. This is also the default behavior if no option is specified.

  • OVERWRITE: The version of the file from the application revision currently being deployed replaces the version already on the instance.

  • RETAIN: The version of the file already on the instance is kept and used as part of the new deployment.

" }, "deploymentStatusMessages":{ "shape":"DeploymentStatusMessageList", @@ -2004,6 +2074,10 @@ "computePlatform":{ "shape":"ComputePlatform", "documentation":"

The destination platform type for the deployment (Lambda, Server, or ECS).

" + }, + "externalId":{ + "shape":"ExternalId", + "documentation":"

The unique ID for an external resource (for example, a CloudFormation stack ID) that is linked to this deployment.

" } }, "documentation":"

Information about a deployment.

" @@ -2082,7 +2156,7 @@ }, "waitTimeInMinutes":{ "shape":"Duration", - "documentation":"

The number of minutes to wait before the status of a blue/green deployment is changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for actionOnTimeout

" + "documentation":"

The number of minutes to wait before the status of a blue/green deployment is changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for actionOnTimeout.

" } }, "documentation":"

Information about how traffic is rerouted to instances in a replacement environment in a blue/green deployment.

" @@ -2093,6 +2167,7 @@ "Created", "Queued", "InProgress", + "Baking", "Succeeded", "Failed", "Stopped", @@ -2126,7 +2201,7 @@ "members":{ "deploymentTargetType":{ "shape":"DeploymentTargetType", - "documentation":"

The deployment type that is specific to the deployment's compute platform.

" + "documentation":"

The deployment type that is specific to the deployment's compute platform or deployments initiated by a CloudFormation stack update.

" }, "instanceTarget":{ "shape":"InstanceTarget", @@ -2139,7 +2214,8 @@ "ecsTarget":{ "shape":"ECSTarget", "documentation":"

Information about the target for a deployment that uses the Amazon ECS compute platform.

" - } + }, + "cloudFormationTarget":{"shape":"CloudFormationTarget"} }, "documentation":"

Information about the deployment target.

" }, @@ -2173,7 +2249,8 @@ "enum":[ "InstanceTarget", "LambdaTarget", - "ECSTarget" + "ECSTarget", + "CloudFormationTarget" ] }, "DeploymentType":{ @@ -2207,7 +2284,7 @@ "documentation":"

The name of the on-premises instance to deregister.

" } }, - "documentation":"

Represents the input of a DeregisterOnPremisesInstance operation.

" + "documentation":"

Represents the input of a DeregisterOnPremisesInstance operation.

" }, "Description":{"type":"string"}, "DescriptionTooLongException":{ @@ -2253,7 +2330,7 @@ }, "Type":{ "shape":"EC2TagFilterType", - "documentation":"

The tag filter type:

  • KEY_ONLY: Key only.

  • VALUE_ONLY: Value only.

  • KEY_AND_VALUE: Key and value.

" + "documentation":"

The tag filter type:

  • KEY_ONLY: Key only.

  • VALUE_ONLY: Value only.

  • KEY_AND_VALUE: Key and value.

" } }, "documentation":"

Information about an EC2 tag filter.

" @@ -2324,7 +2401,7 @@ }, "targetArn":{ "shape":"TargetArn", - "documentation":"

The ARN of the target.

" + "documentation":"

The Amazon Resource Name (ARN) of the target.

" }, "lastUpdatedAt":{ "shape":"Time", @@ -2441,7 +2518,8 @@ "RESOURCE_LIMIT_EXCEEDED", "REVISION_MISSING", "THROTTLED", - "TIMEOUT" + "TIMEOUT", + "CLOUDFORMATION_STACK_FAILURE" ] }, "ErrorInformation":{ @@ -2459,6 +2537,7 @@ "documentation":"

Information about a deployment error.

" }, "ErrorMessage":{"type":"string"}, + "ExternalId":{"type":"string"}, "FileExistsBehavior":{ "type":"string", "enum":[ @@ -2507,7 +2586,7 @@ "documentation":"

The name of an AWS CodeDeploy application associated with the IAM user or AWS account.

" } }, - "documentation":"

Represents the input of a GetApplication operation.

" + "documentation":"

Represents the input of a GetApplication operation.

" }, "GetApplicationOutput":{ "type":"structure", @@ -2517,7 +2596,7 @@ "documentation":"

Information about the application.

" } }, - "documentation":"

Represents the output of a GetApplication operation.

" + "documentation":"

Represents the output of a GetApplication operation.

" }, "GetApplicationRevisionInput":{ "type":"structure", @@ -2535,7 +2614,7 @@ "documentation":"

Information about the application revision to get, including type and location.

" } }, - "documentation":"

Represents the input of a GetApplicationRevision operation.

" + "documentation":"

Represents the input of a GetApplicationRevision operation.

" }, "GetApplicationRevisionOutput":{ "type":"structure", @@ -2553,7 +2632,7 @@ "documentation":"

General information about the revision.

" } }, - "documentation":"

Represents the output of a GetApplicationRevision operation.

" + "documentation":"

Represents the output of a GetApplicationRevision operation.

" }, "GetDeploymentConfigInput":{ "type":"structure", @@ -2564,7 +2643,7 @@ "documentation":"

The name of a deployment configuration associated with the IAM user or AWS account.

" } }, - "documentation":"

Represents the input of a GetDeploymentConfig operation.

" + "documentation":"

Represents the input of a GetDeploymentConfig operation.

" }, "GetDeploymentConfigOutput":{ "type":"structure", @@ -2574,7 +2653,7 @@ "documentation":"

Information about the deployment configuration.

" } }, - "documentation":"

Represents the output of a GetDeploymentConfig operation.

" + "documentation":"

Represents the output of a GetDeploymentConfig operation.

" }, "GetDeploymentGroupInput":{ "type":"structure", @@ -2592,7 +2671,7 @@ "documentation":"

The name of a deployment group for the specified application.

" } }, - "documentation":"

Represents the input of a GetDeploymentGroup operation.

" + "documentation":"

Represents the input of a GetDeploymentGroup operation.

" }, "GetDeploymentGroupOutput":{ "type":"structure", @@ -2602,7 +2681,7 @@ "documentation":"

Information about the deployment group.

" } }, - "documentation":"

Represents the output of a GetDeploymentGroup operation.

" + "documentation":"

Represents the output of a GetDeploymentGroup operation.

" }, "GetDeploymentInput":{ "type":"structure", @@ -2613,7 +2692,7 @@ "documentation":"

The unique ID of a deployment associated with the IAM user or AWS account.

" } }, - "documentation":"

Represents the input of a GetDeployment operation.

" + "documentation":"

Represents the input of a GetDeployment operation.

" }, "GetDeploymentInstanceInput":{ "type":"structure", @@ -2631,7 +2710,7 @@ "documentation":"

The unique ID of an instance in the deployment group.

" } }, - "documentation":"

Represents the input of a GetDeploymentInstance operation.

" + "documentation":"

Represents the input of a GetDeploymentInstance operation.

" }, "GetDeploymentInstanceOutput":{ "type":"structure", @@ -2641,7 +2720,7 @@ "documentation":"

Information about the instance.

" } }, - "documentation":"

Represents the output of a GetDeploymentInstance operation.

" + "documentation":"

Represents the output of a GetDeploymentInstance operation.

" }, "GetDeploymentOutput":{ "type":"structure", @@ -2651,7 +2730,7 @@ "documentation":"

Information about the deployment.

" } }, - "documentation":"

Represents the output of a GetDeployment operation.

" + "documentation":"

Represents the output of a GetDeployment operation.

" }, "GetDeploymentTargetInput":{ "type":"structure", @@ -2671,7 +2750,7 @@ "members":{ "deploymentTarget":{ "shape":"DeploymentTarget", - "documentation":"

A deployment target that contains information about a deployment such as its status, lifecyle events, and when it was last updated. It also contains metadata about the deployment target. The deployment target metadata depends on the deployment target's type (instanceTarget, lambdaTarget, or ecsTarget).

" + "documentation":"

A deployment target that contains information about a deployment such as its status, lifecycle events, and when it was last updated. It also contains metadata about the deployment target. The deployment target metadata depends on the deployment target's type (instanceTarget, lambdaTarget, or ecsTarget).

" } } }, @@ -2684,7 +2763,7 @@ "documentation":"

The name of the on-premises instance about which to get information.

" } }, - "documentation":"

Represents the input of a GetOnPremisesInstance operation.

" + "documentation":"

Represents the input of a GetOnPremisesInstance operation.

" }, "GetOnPremisesInstanceOutput":{ "type":"structure", @@ -2694,7 +2773,7 @@ "documentation":"

Information about the on-premises instance.

" } }, - "documentation":"

Represents the output of a GetOnPremisesInstance operation.

" + "documentation":"

Represents the output of a GetOnPremisesInstance operation.

" }, "GitHubAccountTokenDoesNotExistException":{ "type":"structure", @@ -2741,7 +2820,7 @@ "members":{ "action":{ "shape":"GreenFleetProvisioningAction", - "documentation":"

The method used to add instances to a replacement environment.

  • DISCOVER_EXISTING: Use instances that already exist or will be created manually.

  • COPY_AUTO_SCALING_GROUP: Use settings from a specified Auto Scaling group to define and create instances in a new Auto Scaling group.

" + "documentation":"

The method used to add instances to a replacement environment.

  • DISCOVER_EXISTING: Use instances that already exist or will be created manually.

  • COPY_AUTO_SCALING_GROUP: Use settings from a specified Auto Scaling group to define and create instances in a new Auto Scaling group.

" } }, "documentation":"

Information about the instances that belong to the replacement environment in a blue/green deployment.

" @@ -2906,11 +2985,11 @@ }, "status":{ "shape":"InstanceStatus", - "documentation":"

The deployment status for this instance:

  • Pending: The deployment is pending for this instance.

  • In Progress: The deployment is in progress for this instance.

  • Succeeded: The deployment has succeeded for this instance.

  • Failed: The deployment has failed for this instance.

  • Skipped: The deployment has been skipped for this instance.

  • Unknown: The deployment status is unknown for this instance.

" + "documentation":"

The deployment status for this instance:

  • Pending: The deployment is pending for this instance.

  • In Progress: The deployment is in progress for this instance.

  • Succeeded: The deployment has succeeded for this instance.

  • Failed: The deployment has failed for this instance.

  • Skipped: The deployment has been skipped for this instance.

  • Unknown: The deployment status is unknown for this instance.

" }, "lastUpdatedAt":{ "shape":"Timestamp", - "documentation":"

A timestamp that indicaties when the instance information was last updated.

" + "documentation":"

A timestamp that indicates when the instance information was last updated.

" }, "lifecycleEvents":{ "shape":"LifecycleEventList", @@ -2942,7 +3021,7 @@ }, "targetArn":{ "shape":"TargetArn", - "documentation":"

The ARN of the target.

" + "documentation":"

The Amazon Resource Name (ARN) of the target.

" }, "status":{ "shape":"TargetStatus", @@ -3031,7 +3110,7 @@ "type":"structure", "members":{ }, - "documentation":"

The computePlatform is invalid. The computePlatform should be Lambda or Server.

", + "documentation":"

The computePlatform is invalid. The computePlatform should be Lambda, Server, or ECS.

", "exception":true }, "InvalidDeployedStateFilterException":{ @@ -3125,6 +3204,13 @@ "documentation":"

The Amazon ECS service identifier is not valid.

", "exception":true }, + "InvalidExternalIdException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The external ID was specified in an invalid format.

", + "exception":true + }, "InvalidFileExistsBehaviorException":{ "type":"structure", "members":{ @@ -3381,7 +3467,7 @@ }, "functionAlias":{ "shape":"LambdaFunctionAlias", - "documentation":"

The alias of a Lambda function. For more information, see Introduction to AWS Lambda Aliases.

" + "documentation":"

The alias of a Lambda function. For more information, see AWS Lambda Function Aliases in the AWS Lambda Developer Guide.

" }, "currentVersion":{ "shape":"Version", @@ -3412,7 +3498,7 @@ }, "targetArn":{ "shape":"TargetArn", - "documentation":"

The ARN of the target.

" + "documentation":"

The Amazon Resource Name (ARN) of the target.

" }, "status":{ "shape":"TargetStatus", @@ -3471,7 +3557,7 @@ "members":{ "lifecycleEventName":{ "shape":"LifecycleEventName", - "documentation":"

The deployment lifecycle event name, such as ApplicationStop, BeforeInstall, AfterInstall, ApplicationStart, or ValidateService.

" + "documentation":"

The deployment lifecycle event name, such as ApplicationStop, BeforeInstall, AfterInstall, ApplicationStart, or ValidateService.

" }, "diagnostics":{ "shape":"Diagnostics", @@ -3534,11 +3620,11 @@ }, "sortBy":{ "shape":"ApplicationRevisionSortBy", - "documentation":"

The column name to use to sort the list results:

  • registerTime: Sort by the time the revisions were registered with AWS CodeDeploy.

  • firstUsedTime: Sort by the time the revisions were first used in a deployment.

  • lastUsedTime: Sort by the time the revisions were last used in a deployment.

If not specified or set to null, the results are returned in an arbitrary order.

" + "documentation":"

The column name to use to sort the list results:

  • registerTime: Sort by the time the revisions were registered with AWS CodeDeploy.

  • firstUsedTime: Sort by the time the revisions were first used in a deployment.

  • lastUsedTime: Sort by the time the revisions were last used in a deployment.

If not specified or set to null, the results are returned in an arbitrary order.

" }, "sortOrder":{ "shape":"SortOrder", - "documentation":"

The order in which to sort the list results:

  • ascending: ascending order.

  • descending: descending order.

If not specified, the results are sorted in ascending order.

If set to null, the results are sorted in an arbitrary order.

" + "documentation":"

The order in which to sort the list results:

  • ascending: ascending order.

  • descending: descending order.

If not specified, the results are sorted in ascending order.

If set to null, the results are sorted in an arbitrary order.

" }, "s3Bucket":{ "shape":"S3Bucket", @@ -3550,14 +3636,14 @@ }, "deployed":{ "shape":"ListStateFilterAction", - "documentation":"

Whether to list revisions based on whether the revision is the target revision of an deployment group:

  • include: List revisions that are target revisions of a deployment group.

  • exclude: Do not list revisions that are target revisions of a deployment group.

  • ignore: List all revisions.

" + "documentation":"

Whether to list revisions based on whether the revision is the target revision of a deployment group:

  • include: List revisions that are target revisions of a deployment group.

  • exclude: Do not list revisions that are target revisions of a deployment group.

  • ignore: List all revisions.

" }, "nextToken":{ "shape":"NextToken", "documentation":"

An identifier returned from the previous ListApplicationRevisions call. It can be used to return the next set of applications in the list.

" } }, - "documentation":"

Represents the input of a ListApplicationRevisions operation.

" + "documentation":"

Represents the input of a ListApplicationRevisions operation.

" }, "ListApplicationRevisionsOutput":{ "type":"structure", @@ -3571,7 +3657,7 @@ "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list application revisions call to return the next set of application revisions in the list.

" } }, - "documentation":"

Represents the output of a ListApplicationRevisions operation.

" + "documentation":"

Represents the output of a ListApplicationRevisions operation.

" }, "ListApplicationsInput":{ "type":"structure", @@ -3581,7 +3667,7 @@ "documentation":"

An identifier returned from the previous list applications call. It can be used to return the next set of applications in the list.

" } }, - "documentation":"

Represents the input of a ListApplications operation.

" + "documentation":"

Represents the input of a ListApplications operation.

" }, "ListApplicationsOutput":{ "type":"structure", @@ -3605,21 +3691,21 @@ "documentation":"

An identifier returned from the previous ListDeploymentConfigs call. It can be used to return the next set of deployment configurations in the list.

" } }, - "documentation":"

Represents the input of a ListDeploymentConfigs operation.

" + "documentation":"

Represents the input of a ListDeploymentConfigs operation.

" }, "ListDeploymentConfigsOutput":{ "type":"structure", "members":{ "deploymentConfigsList":{ "shape":"DeploymentConfigsList", - "documentation":"

A list of deployment configurations, including built-in configurations such as CodeDeployDefault.OneAtATime.

" + "documentation":"

A list of deployment configurations, including built-in configurations such as CodeDeployDefault.OneAtATime.

" }, "nextToken":{ "shape":"NextToken", "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list deployment configurations call to return the next set of deployment configurations in the list.

" } }, - "documentation":"

Represents the output of a ListDeploymentConfigs operation.

" + "documentation":"

Represents the output of a ListDeploymentConfigs operation.

" }, "ListDeploymentGroupsInput":{ "type":"structure", @@ -3634,7 +3720,7 @@ "documentation":"

An identifier returned from the previous list deployment groups call. It can be used to return the next set of deployment groups in the list.

" } }, - "documentation":"

Represents the input of a ListDeploymentGroups operation.

" + "documentation":"

Represents the input of a ListDeploymentGroups operation.

" }, "ListDeploymentGroupsOutput":{ "type":"structure", @@ -3652,7 +3738,7 @@ "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list deployment groups call to return the next set of deployment groups in the list.

" } }, - "documentation":"

Represents the output of a ListDeploymentGroups operation.

" + "documentation":"

Represents the output of a ListDeploymentGroups operation.

" }, "ListDeploymentInstancesInput":{ "type":"structure", @@ -3668,14 +3754,14 @@ }, "instanceStatusFilter":{ "shape":"InstanceStatusList", - "documentation":"

A subset of instances to list by status:

  • Pending: Include those instances with pending deployments.

  • InProgress: Include those instances where deployments are still in progress.

  • Succeeded: Include those instances with successful deployments.

  • Failed: Include those instances with failed deployments.

  • Skipped: Include those instances with skipped deployments.

  • Unknown: Include those instances with deployments in an unknown state.

" + "documentation":"

A subset of instances to list by status:

  • Pending: Include those instances with pending deployments.

  • InProgress: Include those instances where deployments are still in progress.

  • Succeeded: Include those instances with successful deployments.

  • Failed: Include those instances with failed deployments.

  • Skipped: Include those instances with skipped deployments.

  • Unknown: Include those instances with deployments in an unknown state.

" }, "instanceTypeFilter":{ "shape":"InstanceTypeList", "documentation":"

The set of instances in a blue/green deployment, either those in the original environment (\"BLUE\") or those in the replacement environment (\"GREEN\"), for which you want to view instance information.

" } }, - "documentation":"

Represents the input of a ListDeploymentInstances operation.

" + "documentation":"

Represents the input of a ListDeploymentInstances operation.

" }, "ListDeploymentInstancesOutput":{ "type":"structure", @@ -3689,7 +3775,7 @@ "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list deployment instances call to return the next set of deployment instances in the list.

" } }, - "documentation":"

Represents the output of a ListDeploymentInstances operation.

" + "documentation":"

Represents the output of a ListDeploymentInstances operation.

" }, "ListDeploymentTargetsInput":{ "type":"structure", @@ -3732,9 +3818,13 @@ "shape":"DeploymentGroupName", "documentation":"

The name of a deployment group for the specified application.

If deploymentGroupName is specified, then applicationName must be specified. If it is not specified, then applicationName must not be specified.

" }, + "externalId":{ + "shape":"ExternalId", + "documentation":"

The unique ID of an external resource for returning deployments linked to the external resource.

" + }, "includeOnlyStatuses":{ "shape":"DeploymentStatusList", - "documentation":"

A subset of deployments to list by status:

  • Created: Include created deployments in the resulting list.

  • Queued: Include queued deployments in the resulting list.

  • In Progress: Include in-progress deployments in the resulting list.

  • Succeeded: Include successful deployments in the resulting list.

  • Failed: Include failed deployments in the resulting list.

  • Stopped: Include stopped deployments in the resulting list.

" + "documentation":"

A subset of deployments to list by status:

  • Created: Include created deployments in the resulting list.

  • Queued: Include queued deployments in the resulting list.

  • In Progress: Include in-progress deployments in the resulting list.

  • Succeeded: Include successful deployments in the resulting list.

  • Failed: Include failed deployments in the resulting list.

  • Stopped: Include stopped deployments in the resulting list.

" }, "createTimeRange":{ "shape":"TimeRange", @@ -3745,7 +3835,7 @@ "documentation":"

An identifier returned from the previous list deployments call. It can be used to return the next set of deployments in the list.

" } }, - "documentation":"

Represents the input of a ListDeployments operation.

" + "documentation":"

Represents the input of a ListDeployments operation.

" }, "ListDeploymentsOutput":{ "type":"structure", @@ -3759,17 +3849,17 @@ "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent list deployments call to return the next set of deployments in the list.

" } }, - "documentation":"

Represents the output of a ListDeployments operation.

" + "documentation":"

Represents the output of a ListDeployments operation.

" }, "ListGitHubAccountTokenNamesInput":{ "type":"structure", "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

An identifier returned from the previous ListGitHubAccountTokenNames call. It can be used to return the next set of names in the list.

" + "documentation":"

An identifier returned from the previous ListGitHubAccountTokenNames call. It can be used to return the next set of names in the list.

" } }, - "documentation":"

Represents the input of a ListGitHubAccountTokenNames operation.

" + "documentation":"

Represents the input of a ListGitHubAccountTokenNames operation.

" }, "ListGitHubAccountTokenNamesOutput":{ "type":"structure", @@ -3780,17 +3870,17 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent ListGitHubAccountTokenNames call to return the next set of names in the list.

" + "documentation":"

If a large amount of information is returned, an identifier is also returned. It can be used in a subsequent ListGitHubAccountTokenNames call to return the next set of names in the list.

" } }, - "documentation":"

Represents the output of a ListGitHubAccountTokenNames operation.

" + "documentation":"

Represents the output of a ListGitHubAccountTokenNames operation.

" }, "ListOnPremisesInstancesInput":{ "type":"structure", "members":{ "registrationStatus":{ "shape":"RegistrationStatus", - "documentation":"

The registration status of the on-premises instances:

  • Deregistered: Include deregistered on-premises instances in the resulting list.

  • Registered: Include registered on-premises instances in the resulting list.

" + "documentation":"

The registration status of the on-premises instances:

  • Deregistered: Include deregistered on-premises instances in the resulting list.

  • Registered: Include registered on-premises instances in the resulting list.

" }, "tagFilters":{ "shape":"TagFilterList", @@ -3801,7 +3891,7 @@ "documentation":"

An identifier returned from the previous list on-premises instances call. It can be used to return the next set of on-premises instances in the list.

" } }, - "documentation":"

Represents the input of a ListOnPremisesInstances operation.

" + "documentation":"

Represents the input of a ListOnPremisesInstances operation.

" }, "ListOnPremisesInstancesOutput":{ "type":"structure", @@ -3886,7 +3976,7 @@ }, "type":{ "shape":"MinimumHealthyHostsType", - "documentation":"

The minimum healthy instance type:

  • HOST_COUNT: The minimum number of healthy instance as an absolute value.

  • FLEET_PERCENT: The minimum number of healthy instance as a percentage of the total number of instance in the deployment.

In an example of nine instance, if a HOST_COUNT of six is specified, deploy to up to three instances at a time. The deployment is successful if six or more instances are deployed to successfully. Otherwise, the deployment fails. If a FLEET_PERCENT of 40 is specified, deploy to up to five instance at a time. The deployment is successful if four or more instance are deployed to successfully. Otherwise, the deployment fails.

In a call to the GetDeploymentConfig, CodeDeployDefault.OneAtATime returns a minimum healthy instance type of MOST_CONCURRENCY and a value of 1. This means a deployment to only one instance at a time. (You cannot set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) In addition, with CodeDeployDefault.OneAtATime, AWS CodeDeploy attempts to ensure that all instances but one are kept in a healthy state during the deployment. Although this allows one instance at a time to be taken offline for a new deployment, it also means that if the deployment to the last instance fails, the overall deployment is still successful.

For more information, see AWS CodeDeploy Instance Health in the AWS CodeDeploy User Guide.

" + "documentation":"

The minimum healthy instance type:

  • HOST_COUNT: The minimum number of healthy instances as an absolute value.

  • FLEET_PERCENT: The minimum number of healthy instances as a percentage of the total number of instances in the deployment.

In an example of nine instances, if a HOST_COUNT of six is specified, deploy to up to three instances at a time. The deployment is successful if six or more instances are deployed to successfully. Otherwise, the deployment fails. If a FLEET_PERCENT of 40 is specified, deploy to up to five instances at a time. The deployment is successful if four or more instances are deployed to successfully. Otherwise, the deployment fails.

In a call to the GetDeploymentConfig, CodeDeployDefault.OneAtATime returns a minimum healthy instance type of MOST_CONCURRENCY and a value of 1. This means a deployment to only one instance at a time. (You cannot set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) In addition, with CodeDeployDefault.OneAtATime, AWS CodeDeploy attempts to ensure that all instances but one are kept in a healthy state during the deployment. Although this allows one instance at a time to be taken offline for a new deployment, it also means that if the deployment to the last instance fails, the overall deployment is still successful.

For more information, see AWS CodeDeploy Instance Health in the AWS CodeDeploy User Guide.

" } }, "documentation":"

Information about minimum healthy instance.

" @@ -4038,7 +4128,7 @@ "documentation":"

The names of the on-premises instances from which to remove tags.

" } }, - "documentation":"

Represents the input of a RemoveTagsFromOnPremisesInstances operation.

" + "documentation":"

Represents the input of a RemoveTagsFromOnPremisesInstances operation.

" }, "Repository":{"type":"string"}, "ResourceArnRequiredException":{ @@ -4085,7 +4175,7 @@ "members":{ "revisionType":{ "shape":"RevisionLocationType", - "documentation":"

The type of application revision:

  • S3: An application revision stored in Amazon S3.

  • GitHub: An application revision stored in GitHub (EC2/On-premises deployments only).

  • String: A YAML-formatted or JSON-formatted string (AWS Lambda deployments only).

" + "documentation":"

The type of application revision:

  • S3: An application revision stored in Amazon S3.

  • GitHub: An application revision stored in GitHub (EC2/On-premises deployments only).

  • String: A YAML-formatted or JSON-formatted string (AWS Lambda deployments only).

  • AppSpecContent: An AppSpecContent object that contains the contents of an AppSpec file for an AWS Lambda or Amazon ECS deployment. The content is formatted as JSON or YAML stored as a RawString.

" }, "s3Location":{ "shape":"S3Location", @@ -4167,7 +4257,7 @@ }, "bundleType":{ "shape":"BundleType", - "documentation":"

The file type of the application revision. Must be one of the following:

  • tar: A tar archive file.

  • tgz: A compressed tar archive file.

  • zip: A zip archive file.

" + "documentation":"

The file type of the application revision. Must be one of the following:

  • tar: A tar archive file.

  • tgz: A compressed tar archive file.

  • zip: A zip archive file.

" }, "version":{ "shape":"VersionId", @@ -4210,7 +4300,7 @@ "documentation":"

Indicates, when a deployment is stopped, whether instances that have been updated should be rolled back to the previous version of the application revision.

" } }, - "documentation":"

Represents the input of a StopDeployment operation.

" + "documentation":"

Represents the input of a StopDeployment operation.

" }, "StopDeploymentOutput":{ "type":"structure", @@ -4224,7 +4314,7 @@ "documentation":"

An accompanying status message.

" } }, - "documentation":"

Represents the output of a StopDeployment operation.

" + "documentation":"

Represents the output of a StopDeployment operation.

" }, "StopStatus":{ "type":"string", @@ -4388,7 +4478,7 @@ "members":{ "tagFilters":{ "shape":"EC2TagFilterList", - "documentation":"

The tag filter key, type, and value used to identify Amazon EC2 instances in a replacement environment for a blue/green deployment. Cannot be used in the same call as ec2TagSet.

" + "documentation":"

The tag filter key, type, and value used to identify Amazon EC2 instances in a replacement environment for a blue/green deployment. Cannot be used in the same call as ec2TagSet.

" }, "autoScalingGroups":{ "shape":"AutoScalingGroupNameList", @@ -4396,7 +4486,7 @@ }, "ec2TagSet":{ "shape":"EC2TagSet", - "documentation":"

Information about the groups of EC2 instance tags that an instance must be identified by in order for it to be included in the replacement environment for a blue/green deployment. Cannot be used in the same call as tagFilters.

" + "documentation":"

Information about the groups of EC2 instance tags that an instance must be identified by in order for it to be included in the replacement environment for a blue/green deployment. Cannot be used in the same call as tagFilters.

" } }, "documentation":"

Information about the instances to be used in the replacement environment in a blue/green deployment.

" @@ -4440,7 +4530,7 @@ "documentation":"

The number of minutes between the first and second traffic shifts of a TimeBasedCanary deployment.

" } }, - "documentation":"

A configuration that shifts traffic from one version of a Lambda function to another in two increments. The original and target Lambda function versions are specified in the deployment's AppSpec file.

" + "documentation":"

A configuration that shifts traffic from one version of a Lambda function or ECS task set to another in two increments. The original and target Lambda function versions or ECS task sets are specified in the deployment's AppSpec file.

" }, "TimeBasedLinear":{ "type":"structure", @@ -4454,7 +4544,7 @@ "documentation":"

The number of minutes between each incremental traffic shift of a TimeBasedLinear deployment.

" } }, - "documentation":"

A configuration that shifts traffic from one version of a Lambda function to another in equal increments, with an equal number of minutes between each increment. The original and target Lambda function versions are specified in the deployment's AppSpec file.

" + "documentation":"

A configuration that shifts traffic from one version of a Lambda function or ECS task set to another in equal increments, with an equal number of minutes between each increment. The original and target Lambda function versions or ECS task sets are specified in the deployment's AppSpec file.

" }, "TimeRange":{ "type":"structure", @@ -4476,7 +4566,7 @@ "members":{ "listenerArns":{ "shape":"ListenerArnList", - "documentation":"

The ARN of one listener. The listener identifies the route between a target group and a load balancer. This is an array of strings with a maximum size of one.

" + "documentation":"

The Amazon Resource Name (ARN) of one listener. The listener identifies the route between a target group and a load balancer. This is an array of strings with a maximum size of one.

" } }, "documentation":"

Information about a listener. The listener contains the path used to route traffic that is received from the load balancer to a target group.

" @@ -4486,18 +4576,18 @@ "members":{ "type":{ "shape":"TrafficRoutingType", - "documentation":"

The type of traffic shifting (TimeBasedCanary or TimeBasedLinear) used by a deployment configuration .

" + "documentation":"

The type of traffic shifting (TimeBasedCanary or TimeBasedLinear) used by a deployment configuration.

" }, "timeBasedCanary":{ "shape":"TimeBasedCanary", - "documentation":"

A configuration that shifts traffic from one version of a Lambda function to another in two increments. The original and target Lambda function versions are specified in the deployment's AppSpec file.

" + "documentation":"

A configuration that shifts traffic from one version of a Lambda function or ECS task set to another in two increments. The original and target Lambda function versions or ECS task sets are specified in the deployment's AppSpec file.

" }, "timeBasedLinear":{ "shape":"TimeBasedLinear", - "documentation":"

A configuration that shifts traffic from one version of a Lambda function to another in equal increments, with an equal number of minutes between each increment. The original and target Lambda function versions are specified in the deployment's AppSpec file.

" + "documentation":"

A configuration that shifts traffic from one version of a Lambda function or ECS task set to another in equal increments, with an equal number of minutes between each increment. The original and target Lambda function versions or ECS task sets are specified in the deployment's AppSpec file.

" } }, - "documentation":"

The configuration that specifies how traffic is shifted from one version of a Lambda function to another version during an AWS Lambda deployment.

" + "documentation":"

The configuration that specifies how traffic is shifted from one version of a Lambda function to another version during an AWS Lambda deployment, or from one Amazon ECS task set to another during an Amazon ECS deployment.

" }, "TrafficRoutingType":{ "type":"string", @@ -4517,7 +4607,7 @@ }, "triggerTargetArn":{ "shape":"TriggerTargetArn", - "documentation":"

The ARN of the Amazon Simple Notification Service topic through which notifications about deployment or instance events are sent.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Simple Notification Service topic through which notifications about deployment or instance events are sent.

" }, "triggerEvents":{ "shape":"TriggerEventTypeList", @@ -4574,7 +4664,7 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

The ARN that specifies from which resource to disassociate the tags with the keys in the TagKeys input paramter.

" + "documentation":"

The Amazon Resource Name (ARN) that specifies from which resource to disassociate the tags with the keys in the TagKeys input parameter.

" }, "TagKeys":{ "shape":"TagKeyList", @@ -4599,7 +4689,7 @@ "documentation":"

The new name to give the application.

" } }, - "documentation":"

Represents the input of an UpdateApplication operation.

" + "documentation":"

Represents the input of an UpdateApplication operation.

" }, "UpdateDeploymentGroupInput":{ "type":"structure", @@ -4642,7 +4732,7 @@ }, "triggerConfigurations":{ "shape":"TriggerConfigList", - "documentation":"

Information about triggers to change when the deployment group is updated. For examples, see Modify Triggers in an AWS CodeDeploy Deployment Group in the AWS CodeDeploy User Guide.

" + "documentation":"

Information about triggers to change when the deployment group is updated. For examples, see Edit a Trigger in a CodeDeploy Deployment Group in the AWS CodeDeploy User Guide.

" }, "alarmConfiguration":{ "shape":"AlarmConfiguration", @@ -4677,7 +4767,7 @@ "documentation":"

Information about an on-premises instance tag set. The deployment group includes only on-premises instances identified by all the tag groups.

" } }, - "documentation":"

Represents the input of an UpdateDeploymentGroup operation.

" + "documentation":"

Represents the input of an UpdateDeploymentGroup operation.

" }, "UpdateDeploymentGroupOutput":{ "type":"structure", @@ -4687,7 +4777,7 @@ "documentation":"

If the output contains no data, and the corresponding deployment group contained at least one Auto Scaling group, AWS CodeDeploy successfully removed all corresponding Auto Scaling lifecycle event hooks from the AWS account. If the output contains data, AWS CodeDeploy could not remove some Auto Scaling lifecycle event hooks from the AWS account.

" } }, - "documentation":"

Represents the output of an UpdateDeploymentGroup operation.

" + "documentation":"

Represents the output of an UpdateDeploymentGroup operation.

" }, "Value":{"type":"string"}, "Version":{"type":"string"}, diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index 681feef3a023..decb18d70c42 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codeguruprofiler/src/main/resources/codegen-resources/paginators-1.json b/services/codeguruprofiler/src/main/resources/codegen-resources/paginators-1.json index afbbca8aabb5..67d53f6bf119 100644 --- a/services/codeguruprofiler/src/main/resources/codegen-resources/paginators-1.json +++ b/services/codeguruprofiler/src/main/resources/codegen-resources/paginators-1.json @@ -1,10 +1,21 @@ { "pagination": { - "ListProfileTimes": { + "GetFindingsReportAccountSummary": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, + "ListFindingsReports": { "input_token": "nextToken", "output_token": "nextToken", "limit_key": "maxResults" }, + "ListProfileTimes": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "profileTimes" + }, "ListProfilingGroups": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/codeguruprofiler/src/main/resources/codegen-resources/service-2.json b/services/codeguruprofiler/src/main/resources/codegen-resources/service-2.json index f9f10049de00..dc6f25873b18 100644 --- a/services/codeguruprofiler/src/main/resources/codegen-resources/service-2.json +++ b/services/codeguruprofiler/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,42 @@ "uid":"codeguruprofiler-2019-07-18" }, "operations":{ + "AddNotificationChannels":{ + "name":"AddNotificationChannels", + "http":{ + "method":"POST", + "requestUri":"/profilingGroups/{profilingGroupName}/notificationConfiguration", + "responseCode":200 + }, + "input":{"shape":"AddNotificationChannelsRequest"}, + "output":{"shape":"AddNotificationChannelsResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Add up to 2 anomaly notifications channels for a profiling group.

" + }, + "BatchGetFrameMetricData":{ + "name":"BatchGetFrameMetricData", + "http":{ + "method":"POST", + "requestUri":"/profilingGroups/{profilingGroupName}/frames/-/metrics", + "responseCode":200 + }, + "input":{"shape":"BatchGetFrameMetricDataRequest"}, + "output":{"shape":"BatchGetFrameMetricDataResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the time series of values for a requested list of frame metrics from a time period.

" + }, "ConfigureAgent":{ "name":"ConfigureAgent", "http":{ @@ -27,7 +63,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

" + "documentation":"

Used by profiler agents to report their current state and to receive remote configuration updates. For example, ConfigureAgent can be used to tell and agent whether to profile or not and for how long to return profiling data.

" }, "CreateProfilingGroup":{ "name":"CreateProfilingGroup", @@ -81,7 +117,56 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes a profiling group.

" + "documentation":"

Returns a ProfilingGroupDescription object that contains information about the requested profiling group.

" + }, + "GetFindingsReportAccountSummary":{ + "name":"GetFindingsReportAccountSummary", + "http":{ + "method":"GET", + "requestUri":"/internal/findingsReports", + "responseCode":200 + }, + "input":{"shape":"GetFindingsReportAccountSummaryRequest"}, + "output":{"shape":"GetFindingsReportAccountSummaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of FindingsReportSummary objects that contain analysis results for all profiling groups in your AWS account.

" + }, + "GetNotificationConfiguration":{ + "name":"GetNotificationConfiguration", + "http":{ + "method":"GET", + "requestUri":"/profilingGroups/{profilingGroupName}/notificationConfiguration", + "responseCode":200 + }, + "input":{"shape":"GetNotificationConfigurationRequest"}, + "output":{"shape":"GetNotificationConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Get the current configuration for anomaly notifications for a profiling group.

" + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"GET", + "requestUri":"/profilingGroups/{profilingGroupName}/policy", + "responseCode":200 + }, + "input":{"shape":"GetPolicyRequest"}, + "output":{"shape":"GetPolicyResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the JSON-formatted resource-based policy on a profiling group.

" }, "GetProfile":{ "name":"GetProfile", @@ -98,7 +183,41 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets the aggregated profile of a profiling group for the specified time range. If the requested time range does not align with the available aggregated profiles, it is expanded to attain alignment. If aggregated profiles are available only for part of the period requested, the profile is returned from the earliest available to the latest within the requested time range.

For example, if the requested time range is from 00:00 to 00:20 and the available profiles are from 00:15 to 00:25, the returned profile will be from 00:15 to 00:20.

You must specify exactly two of the following parameters: startTime, period, and endTime.

" + "documentation":"

Gets the aggregated profile of a profiling group for a specified time range. Amazon CodeGuru Profiler collects posted agent profiles for a profiling group into aggregated profiles.

 <note> <p> Because aggregated profiles expire over time <code>GetProfile</code> is not idempotent. </p> </note> <p> Specify the time range for the requested aggregated profile using 1 or 2 of the following parameters: <code>startTime</code>, <code>endTime</code>, <code>period</code>. The maximum time range allowed is 7 days. If you specify all 3 parameters, an exception is thrown. If you specify only <code>period</code>, the latest aggregated profile is returned. </p> <p> Aggregated profiles are available with aggregation periods of 5 minutes, 1 hour, and 1 day, aligned to UTC. The aggregation period of an aggregated profile determines how long it is retained. For more information, see <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_AggregatedProfileTime.html"> <code>AggregatedProfileTime</code> </a>. The aggregated profile's aggregation period determines how long it is retained by CodeGuru Profiler. </p> <ul> <li> <p> If the aggregation period is 5 minutes, the aggregated profile is retained for 15 days. </p> </li> <li> <p> If the aggregation period is 1 hour, the aggregated profile is retained for 60 days. </p> </li> <li> <p> If the aggregation period is 1 day, the aggregated profile is retained for 3 years. </p> </li> </ul> <p>There are two use cases for calling <code>GetProfile</code>.</p> <ol> <li> <p> If you want to return an aggregated profile that already exists, use <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_ListProfileTimes.html"> <code>ListProfileTimes</code> </a> to view the time ranges of existing aggregated profiles. Use them in a <code>GetProfile</code> request to return a specific, existing aggregated profile. </p> </li> <li> <p> If you want to return an aggregated profile for a time range that doesn't align with an existing aggregated profile, then CodeGuru Profiler makes a best effort to combine existing aggregated profiles from the requested time range and return them as one aggregated profile. </p> <p> If aggregated profiles do not exist for the full time range requested, then aggregated profiles for a smaller time range are returned. For example, if the requested time range is from 00:00 to 00:20, and the existing aggregated profiles are from 00:15 and 00:25, then the aggregated profiles from 00:15 to 00:20 are returned. </p> </li> </ol> 
" + }, + "GetRecommendations":{ + "name":"GetRecommendations", + "http":{ + "method":"GET", + "requestUri":"/internal/profilingGroups/{profilingGroupName}/recommendations", + "responseCode":200 + }, + "input":{"shape":"GetRecommendationsRequest"}, + "output":{"shape":"GetRecommendationsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns a list of Recommendation objects that contain recommendations for a profiling group for a given time period. A list of Anomaly objects that contains details about anomalies detected in the profiling group for the same time period is also returned.

" + }, + "ListFindingsReports":{ + "name":"ListFindingsReports", + "http":{ + "method":"GET", + "requestUri":"/internal/profilingGroups/{profilingGroupName}/findingsReports", + "responseCode":200 + }, + "input":{"shape":"ListFindingsReportsRequest"}, + "output":{"shape":"ListFindingsReportsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

List the available reports for a given profiling group and time range.

" }, "ListProfileTimes":{ "name":"ListProfileTimes", @@ -115,7 +234,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

List the start times of the available aggregated profiles of a profiling group for an aggregation period within the specified time range.

" + "documentation":"

Lists the start times of the available aggregated profiles of a profiling group for an aggregation period within the specified time range.

" }, "ListProfilingGroups":{ "name":"ListProfilingGroups", @@ -130,7 +249,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists profiling groups.

" + "documentation":"

Returns a list of profiling groups. The profiling groups are returned as ProfilingGroupDescription objects.

" }, "PostAgentProfile":{ "name":"PostAgentProfile", @@ -147,7 +266,79 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

" + "documentation":"

Submits profiling data to an aggregated profile of a profiling group. To get an aggregated profile that is created with this profiling data, use GetProfile .

" + }, + "PutPermission":{ + "name":"PutPermission", + "http":{ + "method":"PUT", + "requestUri":"/profilingGroups/{profilingGroupName}/policy/{actionGroup}", + "responseCode":200 + }, + "input":{"shape":"PutPermissionRequest"}, + "output":{"shape":"PutPermissionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds permissions to a profiling group's resource-based policy that are provided using an action group. If a profiling group doesn't have a resource-based policy, one is created for it using the permissions in the action group and the roles and users in the principals parameter.

 <p> The one supported action group that can be added is <code>agentPermission</code> which grants <code>ConfigureAgent</code> and <code>PostAgent</code> permissions. For more information, see <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-ug/resource-based-policies.html">Resource-based policies in CodeGuru Profiler</a> in the <i>Amazon CodeGuru Profiler User Guide</i>, <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_ConfigureAgent.html"> <code>ConfigureAgent</code> </a>, and <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-api/API_PostAgentProfile.html"> <code>PostAgentProfile</code> </a>. </p> <p> The first time you call <code>PutPermission</code> on a profiling group, do not specify a <code>revisionId</code> because it doesn't have a resource-based policy. Subsequent calls must provide a <code>revisionId</code> to specify which revision of the resource-based policy to add the permissions to. </p> <p> The response contains the profiling group's JSON-formatted resource policy. </p> 
", + "idempotent":true + }, + "RemoveNotificationChannel":{ + "name":"RemoveNotificationChannel", + "http":{ + "method":"DELETE", + "requestUri":"/profilingGroups/{profilingGroupName}/notificationConfiguration/{channelId}", + "responseCode":200 + }, + "input":{"shape":"RemoveNotificationChannelRequest"}, + "output":{"shape":"RemoveNotificationChannelResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Remove one anomaly notifications channel for a profiling group.

", + "idempotent":true + }, + "RemovePermission":{ + "name":"RemovePermission", + "http":{ + "method":"DELETE", + "requestUri":"/profilingGroups/{profilingGroupName}/policy/{actionGroup}", + "responseCode":200 + }, + "input":{"shape":"RemovePermissionRequest"}, + "output":{"shape":"RemovePermissionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes permissions from a profiling group's resource-based policy that are provided using an action group. The one supported action group that can be removed is agentPermission which grants ConfigureAgent and PostAgent permissions. For more information, see Resource-based policies in CodeGuru Profiler in the Amazon CodeGuru Profiler User Guide, ConfigureAgent , and PostAgentProfile .

" + }, + "SubmitFeedback":{ + "name":"SubmitFeedback", + "http":{ + "method":"POST", + "requestUri":"/internal/profilingGroups/{profilingGroupName}/anomalies/{anomalyInstanceId}/feedback", + "responseCode":204 + }, + "input":{"shape":"SubmitFeedbackRequest"}, + "output":{"shape":"SubmitFeedbackResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Sends feedback to CodeGuru Profiler about whether the anomaly detected by the analysis is useful or not.

" }, "UpdateProfilingGroup":{ "name":"UpdateProfilingGroup", @@ -170,6 +361,40 @@ } }, "shapes":{ + "ActionGroup":{ + "type":"string", + "enum":["agentPermissions"] + }, + "AddNotificationChannelsRequest":{ + "type":"structure", + "required":[ + "channels", + "profilingGroupName" + ], + "members":{ + "channels":{ + "shape":"Channels", + "documentation":"

One or 2 channels to report to when anomalies are detected.

" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group that we are setting up notifications for.

", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

The structure representing the AddNotificationChannelsRequest.

" + }, + "AddNotificationChannelsResponse":{ + "type":"structure", + "members":{ + "notificationConfiguration":{ + "shape":"NotificationConfiguration", + "documentation":"

The new notification configuration for this profiling group.

" + } + }, + "documentation":"

The structure representing the AddNotificationChannelsResponse.

" + }, "AgentConfiguration":{ "type":"structure", "required":[ @@ -177,16 +402,20 @@ "shouldProfile" ], "members":{ + "agentParameters":{ + "shape":"AgentParameters", + "documentation":"

Parameters used by the profiler. The valid parameters are:

  • MaxStackDepth - The maximum depth of the stacks in the code that is represented in the profile. For example, if CodeGuru Profiler finds a method A, which calls method B, which calls method C, which calls method D, then the depth is 4. If the maxDepth is set to 2, then the profiler evaluates A and B.

  • MemoryUsageLimitPercent - The percentage of memory that is used by the profiler.

  • MinimumTimeForReportingInMilliseconds - The minimum time in milliseconds between sending reports.

  • ReportingIntervalInMilliseconds - The reporting interval in milliseconds used to report profiles.

  • SamplingIntervalInMilliseconds - The sampling interval in milliseconds that is used to profile samples.

" + }, "periodInSeconds":{ "shape":"Integer", - "documentation":"

" + "documentation":"

How long a profiling agent should send profiling data using ConfigureAgent . For example, if this is set to 300, the profiling agent calls ConfigureAgent every 5 minutes to submit the profiled data collected during that period.

" }, "shouldProfile":{ "shape":"Boolean", - "documentation":"

" + "documentation":"

A Boolean that specifies whether the profiling agent collects profiling data or not. Set to true to enable profiling.

" } }, - "documentation":"

" + "documentation":"

The response of ConfigureAgent that specifies if an agent profiles or not and for how long to return profiling data.

" }, "AgentOrchestrationConfig":{ "type":"structure", @@ -194,10 +423,25 @@ "members":{ "profilingEnabled":{ "shape":"Boolean", - "documentation":"

" + "documentation":"

A Boolean that specifies whether the profiling agent collects profiling data or not. Set to true to enable profiling.

" } }, - "documentation":"

" + "documentation":"

Specifies whether profiling is enabled or disabled for a profiling group. It is used by ConfigureAgent to enable or disable profiling for a profiling group.

" + }, + "AgentParameterField":{ + "type":"string", + "enum":[ + "MaxStackDepth", + "MemoryUsageLimitPercent", + "MinimumTimeForReportingInMilliseconds", + "ReportingIntervalInMilliseconds", + "SamplingIntervalInMilliseconds" + ] + }, + "AgentParameters":{ + "type":"map", + "key":{"shape":"AgentParameterField"}, + "value":{"shape":"String"} }, "AgentProfile":{"type":"blob"}, "AggregatedProfile":{"type":"blob"}, @@ -206,14 +450,14 @@ "members":{ "period":{ "shape":"AggregationPeriod", - "documentation":"

The time period.

" + "documentation":"

The aggregation period. This indicates the period during which an aggregation profile collects posted agent profiles for a profiling group. Use one of three valid durations that are specified using the ISO 8601 format.

  • P1D — 1 day

  • PT1H — 1 hour

  • PT5M — 5 minutes

" }, "start":{ "shape":"Timestamp", - "documentation":"

The start time.

" + "documentation":"

The time that aggregation of posted agent profiles for a profiling group starts. The aggregation profile contains profiles posted by the agent starting at this time for an aggregation period specified by the period property of the AggregatedProfileTime object.

Specify start using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" } }, - "documentation":"

Information about the time range of the latest available aggregated profile.

" + "documentation":"

Specifies the aggregation period and aggregation start time for an aggregated profile. An aggregated profile is used to collect posted agent profiles during an aggregation period. There are three possible aggregation periods (1 day, 1 hour, or 5 minutes).

" }, "AggregationPeriod":{ "type":"string", @@ -223,27 +467,214 @@ "PT5M" ] }, + "Anomalies":{ + "type":"list", + "member":{"shape":"Anomaly"} + }, + "Anomaly":{ + "type":"structure", + "required":[ + "instances", + "metric", + "reason" + ], + "members":{ + "instances":{ + "shape":"AnomalyInstances", + "documentation":"

A list of the instances of the detected anomalies during the requested period.

" + }, + "metric":{ + "shape":"Metric", + "documentation":"

Details about the metric that the analysis used when it detected the anomaly. The metric includes the name of the frame that was analyzed with the type and thread states used to derive the metric value for that frame.

" + }, + "reason":{ + "shape":"String", + "documentation":"

The reason for which metric was flagged as anomalous.

" + } + }, + "documentation":"

Details about an anomaly in a specific metric of application profile. The anomaly is detected using analysis of the metric data over a period of time.

" + }, + "AnomalyInstance":{ + "type":"structure", + "required":[ + "id", + "startTime" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the period during which the metric is flagged as anomalous. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "id":{ + "shape":"String", + "documentation":"

The universally unique identifier (UUID) of an instance of an anomaly in a metric.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the period during which the metric is flagged as anomalous. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "userFeedback":{ + "shape":"UserFeedback", + "documentation":"

Feedback type on a specific instance of anomaly submitted by the user.

" + } + }, + "documentation":"

The specific duration in which the metric is flagged as anomalous.

" + }, + "AnomalyInstanceId":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "AnomalyInstances":{ + "type":"list", + "member":{"shape":"AnomalyInstance"} + }, + "BatchGetFrameMetricDataRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the time period for the returned time series values. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

", + "location":"querystring", + "locationName":"endTime" + }, + "frameMetrics":{ + "shape":"FrameMetrics", + "documentation":"

The details of the metrics that are used to request a time series of values. The metric includes the name of the frame, the aggregation type to calculate the metric value for the frame, and the thread states to use to get the count for the metric value of the frame.

" + }, + "period":{ + "shape":"Period", + "documentation":"

The duration of the frame metrics used to return the time series values. Specify using the ISO 8601 format. The maximum period duration is one day (PT24H or P1D).

", + "location":"querystring", + "locationName":"period" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group associated with the the frame metrics used to return the time series values.

", + "location":"uri", + "locationName":"profilingGroupName" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the time period for the frame metrics used to return the time series values. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

", + "location":"querystring", + "locationName":"startTime" + }, + "targetResolution":{ + "shape":"AggregationPeriod", + "documentation":"

The requested resolution of time steps for the returned time series of values. If the requested target resolution is not available due to data not being retained we provide a best effort result by falling back to the most granular available resolution after the target resolution. There are 3 valid values.

  • P1D — 1 day

  • PT1H — 1 hour

  • PT5M — 5 minutes

", + "location":"querystring", + "locationName":"targetResolution" + } + }, + "documentation":"

The structure representing the BatchGetFrameMetricDataRequest.

" + }, + "BatchGetFrameMetricDataResponse":{ + "type":"structure", + "required":[ + "endTime", + "endTimes", + "frameMetricData", + "resolution", + "startTime", + "unprocessedEndTimes" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the time period for the returned time series values. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "endTimes":{ + "shape":"ListOfTimestamps", + "documentation":"

List of instances, or time steps, in the time series. For example, if the period is one day (PT24H)), and the resolution is five minutes (PT5M), then there are 288 endTimes in the list that are each five minutes appart.

" + }, + "frameMetricData":{ + "shape":"FrameMetricData", + "documentation":"

Details of the metrics to request a time series of values. The metric includes the name of the frame, the aggregation type to calculate the metric value for the frame, and the thread states to use to get the count for the metric value of the frame.

" + }, + "resolution":{ + "shape":"AggregationPeriod", + "documentation":"

Resolution or granularity of the profile data used to generate the time series. This is the value used to jump through time steps in a time series. There are 3 valid values.

  • P1D — 1 day

  • PT1H — 1 hour

  • PT5M — 5 minutes

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the time period for the returned time series values. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "unprocessedEndTimes":{ + "shape":"UnprocessedEndTimeMap", + "documentation":"

List of instances which remained unprocessed. This will create a missing time step in the list of end times.

" + } + }, + "documentation":"

The structure representing the BatchGetFrameMetricDataResponse.

" + }, "Boolean":{ "type":"boolean", "box":true }, + "Channel":{ + "type":"structure", + "required":[ + "eventPublishers", + "uri" + ], + "members":{ + "eventPublishers":{ + "shape":"EventPublishers", + "documentation":"

List of publishers for different type of events that may be detected in an application from the profile. Anomaly detection is the only event publisher in Profiler.

" + }, + "id":{ + "shape":"ChannelId", + "documentation":"

Unique identifier for each Channel in the notification configuration of a Profiling Group. A random UUID for channelId is used when adding a channel to the notification configuration if not specified in the request.

" + }, + "uri":{ + "shape":"ChannelUri", + "documentation":"

Unique arn of the resource to be used for notifications. We support a valid SNS topic arn as a channel uri.

" + } + }, + "documentation":"

Notification medium for users to get alerted for events that occur in application profile. We support SNS topic as a notification channel.

" + }, + "ChannelId":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "ChannelUri":{ + "type":"string", + "documentation":"

Channel URI uniquely identifies a Notification Channel. TopicArn is the uri for an SNS channel, emailId is uri for an email channel etc. Currently we only support SNS channels and thus required to be an ARN

" + }, + "Channels":{ + "type":"list", + "member":{"shape":"Channel"}, + "max":2, + "min":1 + }, "ClientToken":{ "type":"string", "max":64, "min":1, "pattern":"^[\\w-]+$" }, + "ComputePlatform":{ + "type":"string", + "enum":[ + "AWSLambda", + "Default" + ] + }, "ConfigureAgentRequest":{ "type":"structure", "required":["profilingGroupName"], "members":{ "fleetInstanceId":{ "shape":"FleetInstanceId", - "documentation":"

" + "documentation":"

A universally unique identifier (UUID) for a profiling instance. For example, if the profiling instance is an Amazon EC2 instance, it is the instance ID. If it is an AWS Fargate container, it is the container's task ID.

" + }, + "metadata":{ + "shape":"Metadata", + "documentation":"

Metadata captured about the compute platform the agent is running on. It includes information about sampling and reporting. The valid fields are:

  • COMPUTE_PLATFORM - The compute platform on which the agent is running

  • AGENT_ID - The ID for an agent instance.

  • AWS_REQUEST_ID - The AWS request ID of a Lambda invocation.

  • EXECUTION_ENVIRONMENT - The execution environment a Lambda function is running on.

  • LAMBDA_FUNCTION_ARN - The Amazon Resource Name (ARN) that is used to invoke a Lambda function.

  • LAMBDA_MEMORY_LIMIT_IN_MB - The memory allocated to a Lambda function.

  • LAMBDA_REMAINING_TIME_IN_MILLISECONDS - The time in milliseconds before execution of a Lambda function times out.

  • LAMBDA_TIME_GAP_BETWEEN_INVOKES_IN_MILLISECONDS - The time in milliseconds between two invocations of a Lambda function.

  • LAMBDA_PREVIOUS_EXECUTION_TIME_IN_MILLISECONDS - The time in milliseconds for the previous Lambda invocation.

" }, "profilingGroupName":{ "shape":"ProfilingGroupName", - "documentation":"

", + "documentation":"

The name of the profiling group for which the configured agent is collecting profiling data.

", "location":"uri", "locationName":"profilingGroupName" } @@ -256,7 +687,7 @@ "members":{ "configuration":{ "shape":"AgentConfiguration", - "documentation":"

" + "documentation":"

An AgentConfiguration object that specifies if an agent profiles or not and for how long to return profiling data.

" } }, "documentation":"

The structure representing the configureAgentResponse.

", @@ -284,18 +715,22 @@ "members":{ "agentOrchestrationConfig":{ "shape":"AgentOrchestrationConfig", - "documentation":"

The agent orchestration configuration.

" + "documentation":"

Specifies whether profiling is enabled or disabled for the created profiling group.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

This parameter specifies a unique identifier for the new profiling group that helps ensure idempotency.

", + "documentation":"

Amazon CodeGuru Profiler uses this universally unique identifier (UUID) to prevent the accidental creation of duplicate profiling groups if there are failures and retries.

", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" }, + "computePlatform":{ + "shape":"ComputePlatform", + "documentation":"

The compute platform of the profiling group. Use AWSLambda if your application runs on AWS Lambda. Use Default if your application runs on a compute platform that is not AWS Lambda, such an Amazon EC2 instance, an on-premises server, or a different platform. If not specified, Default is used.

" + }, "profilingGroupName":{ "shape":"ProfilingGroupName", - "documentation":"

The name of the profiling group.

" + "documentation":"

The name of the profiling group to create.

" } }, "documentation":"

The structure representing the createProfiliingGroupRequest.

" @@ -306,7 +741,7 @@ "members":{ "profilingGroup":{ "shape":"ProfilingGroupDescription", - "documentation":"

Information about the new profiling group

" + "documentation":"

The returned ProfilingGroupDescription object that contains information about the created profiling group.

" } }, "documentation":"

The structure representing the createProfilingGroupResponse.

", @@ -318,7 +753,7 @@ "members":{ "profilingGroupName":{ "shape":"ProfilingGroupName", - "documentation":"

The profiling group name to delete.

", + "documentation":"

The name of the profiling group to delete.

", "location":"uri", "locationName":"profilingGroupName" } @@ -337,7 +772,7 @@ "members":{ "profilingGroupName":{ "shape":"ProfilingGroupName", - "documentation":"

The profiling group name.

", + "documentation":"

The name of the profiling group to get information about.

", "location":"uri", "locationName":"profilingGroupName" } @@ -350,102 +785,435 @@ "members":{ "profilingGroup":{ "shape":"ProfilingGroupDescription", - "documentation":"

Information about a profiling group.

" + "documentation":"

The returned ProfilingGroupDescription object that contains information about the requested profiling group.

" } }, "documentation":"

The structure representing the describeProfilingGroupResponse.

", "payload":"profilingGroup" }, + "Double":{ + "type":"double", + "box":true + }, + "EventPublisher":{ + "type":"string", + "enum":["AnomalyDetection"] + }, + "EventPublishers":{ + "type":"list", + "member":{"shape":"EventPublisher"}, + "max":1, + "min":1 + }, + "FeedbackType":{ + "type":"string", + "enum":[ + "Negative", + "Positive" + ] + }, + "FindingsReportId":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "FindingsReportSummaries":{ + "type":"list", + "member":{"shape":"FindingsReportSummary"} + }, + "FindingsReportSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"FindingsReportId", + "documentation":"

The universally unique identifier (UUID) of the recommendation report.

" + }, + "profileEndTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the period during which the metric is flagged as anomalous. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "profileStartTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the profile the analysis data is about. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "profilingGroupName":{ + "shape":"String", + "documentation":"

The name of the profiling group that is associated with the analysis data.

" + }, + "totalNumberOfFindings":{ + "shape":"Integer", + "documentation":"

The total number of different recommendations that were found by the analysis.

" + } + }, + "documentation":"

Information about potential recommendations that might be created from the analysis of profiling data.

" + }, "FleetInstanceId":{ "type":"string", "max":255, "min":1, "pattern":"^[\\w-.:/]+$" }, - "GetProfileRequest":{ + "FrameMetric":{ + "type":"structure", + "required":[ + "frameName", + "threadStates", + "type" + ], + "members":{ + "frameName":{ + "shape":"String", + "documentation":"

Name of the method common across the multiple occurrences of a frame in an application profile.

" + }, + "threadStates":{ + "shape":"ThreadStates", + "documentation":"

List of application runtime thread states used to get the counts for a frame a derive a metric value.

" + }, + "type":{ + "shape":"MetricType", + "documentation":"

A type of aggregation that specifies how a metric for a frame is analyzed. The supported value AggregatedRelativeTotalTime is an aggregation of the metric value for one frame that is calculated across the occurrences of all frames in a profile.

" + } + }, + "documentation":"

The frame name, metric type, and thread states. These are used to derive the value of the metric for the frame.

" + }, + "FrameMetricData":{ + "type":"list", + "member":{"shape":"FrameMetricDatum"} + }, + "FrameMetricDatum":{ + "type":"structure", + "required":[ + "frameMetric", + "values" + ], + "members":{ + "frameMetric":{"shape":"FrameMetric"}, + "values":{ + "shape":"FrameMetricValues", + "documentation":"

A list of values that are associated with a frame metric.

" + } + }, + "documentation":"

Information about a frame metric and its values.

" + }, + "FrameMetricValues":{ + "type":"list", + "member":{"shape":"Double"} + }, + "FrameMetrics":{ + "type":"list", + "member":{"shape":"FrameMetric"} + }, + "GetFindingsReportAccountSummaryRequest":{ + "type":"structure", + "members":{ + "dailyReportsOnly":{ + "shape":"Boolean", + "documentation":"

A Boolean value indicating whether to only return reports from daily profiles. If set to True, only analysis data from daily profiles is returned. If set to False, analysis data is returned from smaller time windows (for example, one hour).

", + "location":"querystring", + "locationName":"dailyReportsOnly" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results returned by GetFindingsReportAccountSummary in paginated output. When this parameter is used, GetFindingsReportAccountSummary only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another GetFindingsReportAccountSummary request with the returned nextToken value.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The nextToken value returned from a previous paginated GetFindingsReportAccountSummary request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "location":"querystring", + "locationName":"nextToken" + } + }, + "documentation":"

The structure representing the GetFindingsReportAccountSummaryRequest.

" + }, + "GetFindingsReportAccountSummaryResponse":{ + "type":"structure", + "required":["reportSummaries"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The nextToken value to include in a future GetFindingsReportAccountSummary request. When the results of a GetFindingsReportAccountSummary request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" + }, + "reportSummaries":{ + "shape":"FindingsReportSummaries", + "documentation":"

The return list of FindingsReportSummary objects taht contain summaries of analysis results for all profiling groups in your AWS account.

" + } + }, + "documentation":"

The structure representing the GetFindingsReportAccountSummaryResponse.

" + }, + "GetNotificationConfigurationRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group we want to get the notification configuration for.

", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

The structure representing the GetNotificationConfigurationRequest.

" + }, + "GetNotificationConfigurationResponse":{ + "type":"structure", + "required":["notificationConfiguration"], + "members":{ + "notificationConfiguration":{ + "shape":"NotificationConfiguration", + "documentation":"

The current notification configuration for this profiling group.

" + } + }, + "documentation":"

The structure representing the GetNotificationConfigurationResponse.

" + }, + "GetPolicyRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group.

", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

The structure representing the getPolicyRequest.

" + }, + "GetPolicyResponse":{ + "type":"structure", + "required":[ + "policy", + "revisionId" + ], + "members":{ + "policy":{ + "shape":"String", + "documentation":"

The JSON-formatted resource-based policy attached to the ProfilingGroup.

" + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

A unique identifier for the current revision of the returned policy.

" + } + }, + "documentation":"

The structure representing the getPolicyResponse.

" + }, + "GetProfileRequest":{ + "type":"structure", + "required":["profilingGroupName"], + "members":{ + "accept":{ + "shape":"String", + "documentation":"

The format of the returned profiling data. The format maps to the Accept and Content-Type headers of the HTTP request. You can specify one of the following: or the default .

 <ul> <li> <p> <code>application/json</code> — standard JSON format </p> </li> <li> <p> <code>application/x-amzn-ion</code> — the Amazon Ion data format. For more information, see <a href="http://amzn.github.io/ion-docs/">Amazon Ion</a>. </p> </li> </ul> 
", + "location":"header", + "locationName":"Accept" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the requested profile. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

If you specify endTime, then you must also specify period or startTime, but not both.

", + "location":"querystring", + "locationName":"endTime" + }, + "maxDepth":{ + "shape":"MaxDepth", + "documentation":"

The maximum depth of the stacks in the code that is represented in the aggregated profile. For example, if CodeGuru Profiler finds a method A, which calls method B, which calls method C, which calls method D, then the depth is 4. If the maxDepth is set to 2, then the aggregated profile contains representations of methods A and B.

", + "location":"querystring", + "locationName":"maxDepth" + }, + "period":{ + "shape":"Period", + "documentation":"

Used with startTime or endTime to specify the time range for the returned aggregated profile. Specify using the ISO 8601 format. For example, P1DT1H1M1S.

 <p> To get the latest aggregated profile, specify only <code>period</code>. </p> 
", + "location":"querystring", + "locationName":"period" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group to get.

", + "location":"uri", + "locationName":"profilingGroupName" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the profile to get. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

 <p> If you specify <code>startTime</code>, then you must also specify <code>period</code> or <code>endTime</code>, but not both. </p> 
", + "location":"querystring", + "locationName":"startTime" + } + }, + "documentation":"

The structure representing the getProfileRequest.

" + }, + "GetProfileResponse":{ + "type":"structure", + "required":[ + "contentType", + "profile" + ], + "members":{ + "contentEncoding":{ + "shape":"String", + "documentation":"

The content encoding of the profile.

", + "location":"header", + "locationName":"Content-Encoding" + }, + "contentType":{ + "shape":"String", + "documentation":"

The content type of the profile in the payload. It is either application/json or the default application/x-amzn-ion.

", + "location":"header", + "locationName":"Content-Type" + }, + "profile":{ + "shape":"AggregatedProfile", + "documentation":"

Information about the profile.

" + } + }, + "documentation":"

The structure representing the getProfileResponse.

", + "payload":"profile" + }, + "GetRecommendationsRequest":{ + "type":"structure", + "required":[ + "endTime", + "profilingGroupName", + "startTime" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the profile to get analysis data about. You must specify startTime and endTime. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

", + "location":"querystring", + "locationName":"endTime" + }, + "locale":{ + "shape":"Locale", + "documentation":"

The language used to provide analysis. Specify using a string that is one of the following BCP 47 language codes.

  • de-DE - German, Germany

  • en-GB - English, United Kingdom

  • en-US - English, United States

  • es-ES - Spanish, Spain

  • fr-FR - French, France

  • it-IT - Italian, Italy

  • ja-JP - Japanese, Japan

  • ko-KR - Korean, Republic of Korea

  • pt-BR - Portugese, Brazil

  • zh-CN - Chinese, China

  • zh-TW - Chinese, Taiwan

", + "location":"querystring", + "locationName":"locale" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group to get analysis data about.

", + "location":"uri", + "locationName":"profilingGroupName" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the profile to get analysis data about. You must specify startTime and endTime. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

", + "location":"querystring", + "locationName":"startTime" + } + }, + "documentation":"

The structure representing the GetRecommendationsRequest.

" + }, + "GetRecommendationsResponse":{ + "type":"structure", + "required":[ + "anomalies", + "profileEndTime", + "profileStartTime", + "profilingGroupName", + "recommendations" + ], + "members":{ + "anomalies":{ + "shape":"Anomalies", + "documentation":"

The list of anomalies that the analysis has found for this profile.

" + }, + "profileEndTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the profile the analysis data is about. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "profileStartTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the profile the analysis data is about. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group the analysis data is about.

" + }, + "recommendations":{ + "shape":"Recommendations", + "documentation":"

The list of recommendations that the analysis found for this profile.

" + } + }, + "documentation":"

The structure representing the GetRecommendationsResponse.

" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The server encountered an internal error and is unable to complete the request.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListFindingsReportsRequest":{ "type":"structure", - "required":["profilingGroupName"], + "required":[ + "endTime", + "profilingGroupName", + "startTime" + ], "members":{ - "accept":{ - "shape":"String", - "documentation":"

The format of the profile to return. You can choose application/json or the default application/x-amzn-ion.

", - "location":"header", - "locationName":"Accept" + "dailyReportsOnly":{ + "shape":"Boolean", + "documentation":"

A Boolean value indicating whether to only return reports from daily profiles. If set to True, only analysis data from daily profiles is returned. If set to False, analysis data is returned from smaller time windows (for example, one hour).

", + "location":"querystring", + "locationName":"dailyReportsOnly" }, "endTime":{ "shape":"Timestamp", - "documentation":"

You must specify exactly two of the following parameters: startTime, period, and endTime.

", + "documentation":"

The end time of the profile to get analysis data about. You must specify startTime and endTime. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

", "location":"querystring", "locationName":"endTime" }, - "maxDepth":{ - "shape":"MaxDepth", - "documentation":"

The maximum depth of the graph.

", + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of report results returned by ListFindingsReports in paginated output. When this parameter is used, ListFindingsReports only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListFindingsReports request with the returned nextToken value.

", "location":"querystring", - "locationName":"maxDepth" + "locationName":"maxResults" }, - "period":{ - "shape":"Period", - "documentation":"

The period of the profile to get. The time range must be in the past and not longer than one week.

You must specify exactly two of the following parameters: startTime, period, and endTime.

", + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The nextToken value returned from a previous paginated ListFindingsReportsRequest request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "location":"querystring", - "locationName":"period" + "locationName":"nextToken" }, "profilingGroupName":{ "shape":"ProfilingGroupName", - "documentation":"

The name of the profiling group to get.

", + "documentation":"

The name of the profiling group from which to search for analysis data.

", "location":"uri", "locationName":"profilingGroupName" }, "startTime":{ "shape":"Timestamp", - "documentation":"

The start time of the profile to get.

You must specify exactly two of the following parameters: startTime, period, and endTime.

", + "documentation":"

The start time of the profile to get analysis data about. You must specify startTime and endTime. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

", "location":"querystring", "locationName":"startTime" } }, - "documentation":"

The structure representing the getProfileRequest.

" + "documentation":"

The structure representing the ListFindingsReportsRequest.

" }, - "GetProfileResponse":{ + "ListFindingsReportsResponse":{ "type":"structure", - "required":[ - "contentType", - "profile" - ], + "required":["findingsReportSummaries"], "members":{ - "contentEncoding":{ - "shape":"String", - "documentation":"

The content encoding of the profile.

", - "location":"header", - "locationName":"Content-Encoding" - }, - "contentType":{ - "shape":"String", - "documentation":"

The content type of the profile in the payload. It is either application/json or the default application/x-amzn-ion.

", - "location":"header", - "locationName":"Content-Type" + "findingsReportSummaries":{ + "shape":"FindingsReportSummaries", + "documentation":"

The list of analysis results summaries.

" }, - "profile":{ - "shape":"AggregatedProfile", - "documentation":"

Information about the profile.

" + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The nextToken value to include in a future ListFindingsReports request. When the results of a ListFindingsReports request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

" } }, - "documentation":"

The structure representing the getProfileResponse.

", - "payload":"profile" - }, - "Integer":{ - "type":"integer", - "box":true + "documentation":"

The structure representing the ListFindingsReportsResponse.

" }, - "InternalServerException":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{"shape":"String"} - }, - "documentation":"

The server encountered an internal error and is unable to complete the request.

", - "error":{"httpStatusCode":500}, - "exception":true, - "fault":true + "ListOfTimestamps":{ + "type":"list", + "member":{"shape":"TimestampStructure"} }, "ListProfileTimesRequest":{ "type":"structure", @@ -482,7 +1250,7 @@ }, "period":{ "shape":"AggregationPeriod", - "documentation":"

The aggregation period.

", + "documentation":"

The aggregation period. This specifies the period during which an aggregation profile collects posted agent profiles for a profiling group. There are 3 valid values.

  • P1D — 1 day

  • PT1H — 1 hour

  • PT5M — 5 minutes

", "location":"querystring", "locationName":"period" }, @@ -521,7 +1289,7 @@ "members":{ "includeDescription":{ "shape":"Boolean", - "documentation":"

A Boolean value indicating whether to include a description.

", + "documentation":"

A Boolean value indicating whether to include a description. If true, then a list of ProfilingGroupDescription objects that contain detailed information about profiling groups is returned. If false, then a list of profiling group names is returned.

", "location":"querystring", "locationName":"includeDescription" }, @@ -550,15 +1318,41 @@ }, "profilingGroupNames":{ "shape":"ProfilingGroupNames", - "documentation":"

Information about profiling group names.

" + "documentation":"

A returned list of profiling group names. A list of the names is returned only if includeDescription is false, otherwise a list of ProfilingGroupDescription objects is returned.

" }, "profilingGroups":{ "shape":"ProfilingGroupDescriptions", - "documentation":"

Information about profiling groups.

" + "documentation":"

A returned list ProfilingGroupDescription objects. A list of ProfilingGroupDescription objects is returned only if includeDescription is true, otherwise a list of profiling group names is returned.

" } }, "documentation":"

The structure representing the listProfilingGroupsResponse.

" }, + "Locale":{ + "type":"string", + "documentation":"

BCP47 language code. Supported locales: de-DE, en-GB, en-US, es-ES, fr-FR, it-IT, ja-JP, ko-KR, pt-BR, zh-CN, zh-TW

" + }, + "Match":{ + "type":"structure", + "members":{ + "frameAddress":{ + "shape":"String", + "documentation":"

The location in the profiling graph that contains a recommendation found during analysis.

" + }, + "targetFramesIndex":{ + "shape":"Integer", + "documentation":"

The target frame that triggered a match.

" + }, + "thresholdBreachValue":{ + "shape":"Double", + "documentation":"

The value in the profile data that exceeded the recommendation threshold.

" + } + }, + "documentation":"

The part of a profile that contains a recommendation found during analysis.

" + }, + "Matches":{ + "type":"list", + "member":{"shape":"Match"} + }, "MaxDepth":{ "type":"integer", "box":true, @@ -571,6 +1365,62 @@ "max":1000, "min":1 }, + "Metadata":{ + "type":"map", + "key":{"shape":"MetadataField"}, + "value":{"shape":"String"} + }, + "MetadataField":{ + "type":"string", + "enum":[ + "AgentId", + "AwsRequestId", + "ComputePlatform", + "ExecutionEnvironment", + "LambdaFunctionArn", + "LambdaMemoryLimitInMB", + "LambdaPreviousExecutionTimeInMilliseconds", + "LambdaRemainingTimeInMilliseconds", + "LambdaTimeGapBetweenInvokesInMilliseconds" + ] + }, + "Metric":{ + "type":"structure", + "required":[ + "frameName", + "threadStates", + "type" + ], + "members":{ + "frameName":{ + "shape":"String", + "documentation":"

The name of the method that appears as a frame in any stack in a profile.

" + }, + "threadStates":{ + "shape":"Strings", + "documentation":"

The list of application runtime thread states that is used to calculate the metric value for the frame.

" + }, + "type":{ + "shape":"MetricType", + "documentation":"

A type that specifies how a metric for a frame is analyzed. The supported value AggregatedRelativeTotalTime is an aggregation of the metric value for one frame that is calculated across the occurences of all frames in a profile.

" + } + }, + "documentation":"

Details about the metric that the analysis used when it detected the anomaly. The metric what is analyzed to create recommendations. It includes the name of the frame that was analyzed and the type and thread states used to derive the metric value for that frame.

" + }, + "MetricType":{ + "type":"string", + "enum":["AggregatedRelativeTotalTime"] + }, + "NotificationConfiguration":{ + "type":"structure", + "members":{ + "channels":{ + "shape":"Channels", + "documentation":"

List of up to two channels to be used for sending notifications for events detected from the application profile.

" + } + }, + "documentation":"

The configuration for notifications stored for each profiling group. This includes up to to two channels and a list of event publishers associated with each channel.

" + }, "OrderBy":{ "type":"string", "enum":[ @@ -584,6 +1434,45 @@ "min":1, "pattern":"^[\\w-]+$" }, + "Pattern":{ + "type":"structure", + "members":{ + "countersToAggregate":{ + "shape":"Strings", + "documentation":"

A list of the different counters used to determine if there is a match.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the recommendation. This explains a potential inefficiency in a profiled application.

" + }, + "id":{ + "shape":"String", + "documentation":"

The universally unique identifier (UUID) of this pattern.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name for this pattern.

" + }, + "resolutionSteps":{ + "shape":"String", + "documentation":"

A string that contains the steps recommended to address the potential inefficiency.

" + }, + "targetFrames":{ + "shape":"TargetFrames", + "documentation":"

A list of frame names that were searched during the analysis that generated a recommendation.

" + }, + "thresholdPercent":{ + "shape":"Percentage", + "documentation":"

The percentage of time an application spends in one method that triggers a recommendation. The percentage of time is the same as the percentage of the total gathered sample counts during analysis.

" + } + }, + "documentation":"

A set of rules used to make a recommendation during an analysis.

" + }, + "Percentage":{ + "type":"double", + "max":100, + "min":0 + }, "Period":{ "type":"string", "max":64, @@ -599,24 +1488,24 @@ "members":{ "agentProfile":{ "shape":"AgentProfile", - "documentation":"

" + "documentation":"

The submitted profiling data.

" }, "contentType":{ "shape":"String", - "documentation":"

", + "documentation":"

The format of the submitted profiling data. The format maps to the Accept and Content-Type headers of the HTTP request. You can specify one of the following: or the default .

 <ul> <li> <p> <code>application/json</code> — standard JSON format </p> </li> <li> <p> <code>application/x-amzn-ion</code> — the Amazon Ion data format. For more information, see <a href="http://amzn.github.io/ion-docs/">Amazon Ion</a>. </p> </li> </ul> 
", "location":"header", "locationName":"Content-Type" }, "profileToken":{ "shape":"ClientToken", - "documentation":"

", + "documentation":"

Amazon CodeGuru Profiler uses this universally unique identifier (UUID) to prevent the accidental submission of duplicate profiling data if there are failures and retries.

", "idempotencyToken":true, "location":"querystring", "locationName":"profileToken" }, "profilingGroupName":{ "shape":"ProfilingGroupName", - "documentation":"

", + "documentation":"

The name of the profiling group with the aggregated profile that receives the submitted profiling data.

", "location":"uri", "locationName":"profilingGroupName" } @@ -630,15 +1519,22 @@ }, "documentation":"

The structure representing the postAgentProfileResponse.

" }, + "Principal":{"type":"string"}, + "Principals":{ + "type":"list", + "member":{"shape":"Principal"}, + "max":50, + "min":1 + }, "ProfileTime":{ "type":"structure", "members":{ "start":{ "shape":"Timestamp", - "documentation":"

The start time of the profile.

" + "documentation":"

The start time of a profile. It is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" } }, - "documentation":"

Information about the profile time.

" + "documentation":"

Contains the start time of a profile.

" }, "ProfileTimes":{ "type":"list", @@ -650,15 +1546,19 @@ "members":{ "agentOrchestrationConfig":{ "shape":"AgentOrchestrationConfig", - "documentation":"

" + "documentation":"

An AgentOrchestrationConfig object that indicates if the profiling group is enabled for profiled or not.

" }, "arn":{ "shape":"ProfilingGroupArn", - "documentation":"

The Amazon Resource Name (ARN) identifying the profiling group.

" + "documentation":"

The Amazon Resource Name (ARN) identifying the profiling group resource.

" + }, + "computePlatform":{ + "shape":"ComputePlatform", + "documentation":"

The compute platform of the profiling group. If it is set to AWSLambda, then the profiled application runs on AWS Lambda. If it is set to Default, then the profiled application runs on a compute platform that is not AWS Lambda, such an Amazon EC2 instance, an on-premises server, or a different platform. The default is Default.

" }, "createdAt":{ "shape":"Timestamp", - "documentation":"

The time, in milliseconds since the epoch, when the profiling group was created.

" + "documentation":"

The time when the profiling group was created. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" }, "name":{ "shape":"ProfilingGroupName", @@ -666,14 +1566,14 @@ }, "profilingStatus":{ "shape":"ProfilingStatus", - "documentation":"

The status of the profiling group.

" + "documentation":"

A ProfilingStatus object that includes information about the last time a profile agent pinged back, the last time a profile was received, and the aggregation period and start time for the most recent aggregated profile.

" }, "updatedAt":{ "shape":"Timestamp", - "documentation":"

The time, in milliseconds since the epoch, when the profiling group was last updated.

" + "documentation":"

The date and time when the profiling group was last updated. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" } }, - "documentation":"

The description of a profiling group.

" + "documentation":"

Contains information about a profiling group.

" }, "ProfilingGroupDescriptions":{ "type":"list", @@ -694,18 +1594,188 @@ "members":{ "latestAgentOrchestratedAt":{ "shape":"Timestamp", - "documentation":"

The time, in milliseconds since the epoch, when the latest agent was orchestrated.

" + "documentation":"

The date and time when the profiling agent most recently pinged back. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" }, "latestAgentProfileReportedAt":{ "shape":"Timestamp", - "documentation":"

The time, in milliseconds since the epoch, when the latest agent was reported..

" + "documentation":"

The date and time when the most recent profile was received. Specify using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" }, "latestAggregatedProfile":{ "shape":"AggregatedProfileTime", - "documentation":"

The latest aggregated profile

" + "documentation":"

An AggregatedProfileTime object that contains the aggregation period and start time for an aggregated profile.

" + } + }, + "documentation":"

Profiling status includes information about the last time a profile agent pinged back, the last time a profile was received, and the aggregation period and start time for the most recent aggregated profile.

" + }, + "PutPermissionRequest":{ + "type":"structure", + "required":[ + "actionGroup", + "principals", + "profilingGroupName" + ], + "members":{ + "actionGroup":{ + "shape":"ActionGroup", + "documentation":"

Specifies an action group that contains permissions to add to a profiling group resource. One action group is supported, agentPermissions, which grants permission to perform actions required by the profiling agent, ConfigureAgent and PostAgentProfile permissions.

", + "location":"uri", + "locationName":"actionGroup" + }, + "principals":{ + "shape":"Principals", + "documentation":"

A list ARNs for the roles and users you want to grant access to the profiling group. Wildcards are not are supported in the ARNs.

" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group to grant access to.

", + "location":"uri", + "locationName":"profilingGroupName" + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

A universally unique identifier (UUID) for the revision of the policy you are adding to the profiling group. Do not specify this when you add permissions to a profiling group for the first time. If a policy already exists on the profiling group, you must specify the revisionId.

" + } + }, + "documentation":"

The structure representing the putPermissionRequest.

" + }, + "PutPermissionResponse":{ + "type":"structure", + "required":[ + "policy", + "revisionId" + ], + "members":{ + "policy":{ + "shape":"String", + "documentation":"

The JSON-formatted resource-based policy on the profiling group that includes the added permissions.

" + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

A universally unique identifier (UUID) for the revision of the resource-based policy that includes the added permissions. The JSON-formatted policy is in the policy element of the response.

" + } + }, + "documentation":"

The structure representing the putPermissionResponse.

" + }, + "Recommendation":{ + "type":"structure", + "required":[ + "allMatchesCount", + "allMatchesSum", + "endTime", + "pattern", + "startTime", + "topMatches" + ], + "members":{ + "allMatchesCount":{ + "shape":"Integer", + "documentation":"

How many different places in the profile graph triggered a match.

" + }, + "allMatchesSum":{ + "shape":"Double", + "documentation":"

How much of the total sample count is potentially affected.

" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

End time of the profile that was used by this analysis. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "pattern":{ + "shape":"Pattern", + "documentation":"

The pattern that analysis recognized in the profile to make this recommendation.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the profile that was used by this analysis. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "topMatches":{ + "shape":"Matches", + "documentation":"

List of the matches with most impact.

" + } + }, + "documentation":"

A potential improvement that was found from analyzing the profiling data.

" + }, + "Recommendations":{ + "type":"list", + "member":{"shape":"Recommendation"} + }, + "RemoveNotificationChannelRequest":{ + "type":"structure", + "required":[ + "channelId", + "profilingGroupName" + ], + "members":{ + "channelId":{ + "shape":"ChannelId", + "documentation":"

The id of the channel that we want to stop receiving notifications.

", + "location":"uri", + "locationName":"channelId" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group we want to change notification configuration for.

", + "location":"uri", + "locationName":"profilingGroupName" + } + }, + "documentation":"

The structure representing the RemoveNotificationChannelRequest.

" + }, + "RemoveNotificationChannelResponse":{ + "type":"structure", + "members":{ + "notificationConfiguration":{ + "shape":"NotificationConfiguration", + "documentation":"

The new notification configuration for this profiling group.

" + } + }, + "documentation":"

The structure representing the RemoveNotificationChannelResponse.

" + }, + "RemovePermissionRequest":{ + "type":"structure", + "required":[ + "actionGroup", + "profilingGroupName", + "revisionId" + ], + "members":{ + "actionGroup":{ + "shape":"ActionGroup", + "documentation":"

Specifies an action group that contains the permissions to remove from a profiling group's resource-based policy. One action group is supported, agentPermissions, which grants ConfigureAgent and PostAgentProfile permissions.

", + "location":"uri", + "locationName":"actionGroup" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group.

", + "location":"uri", + "locationName":"profilingGroupName" + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

A universally unique identifier (UUID) for the revision of the resource-based policy from which you want to remove permissions.

", + "location":"querystring", + "locationName":"revisionId" + } + }, + "documentation":"

 The structure representing the <code>removePermissionRequest</code>.</p> 
" + }, + "RemovePermissionResponse":{ + "type":"structure", + "required":[ + "policy", + "revisionId" + ], + "members":{ + "policy":{ + "shape":"String", + "documentation":"

The JSON-formatted resource-based policy on the profiling group after the specified permissions were removed.

" + }, + "revisionId":{ + "shape":"RevisionId", + "documentation":"

A universally unique identifier (UUID) for the revision of the resource-based policy after the specified permissions were removed. The updated JSON-formatted policy is in the policy element of the response.

" } }, - "documentation":"

Information about the profiling status.

" + "documentation":"

The structure representing the removePermissionResponse.

" }, "ResourceNotFoundException":{ "type":"structure", @@ -720,6 +1790,10 @@ }, "exception":true }, + "RevisionId":{ + "type":"string", + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, "ServiceQuotaExceededException":{ "type":"structure", "required":["message"], @@ -734,6 +1808,59 @@ "exception":true }, "String":{"type":"string"}, + "Strings":{ + "type":"list", + "member":{"shape":"String"} + }, + "SubmitFeedbackRequest":{ + "type":"structure", + "required":[ + "anomalyInstanceId", + "profilingGroupName", + "type" + ], + "members":{ + "anomalyInstanceId":{ + "shape":"AnomalyInstanceId", + "documentation":"

The universally unique identifier (UUID) of the AnomalyInstance object that is included in the analysis data.

", + "location":"uri", + "locationName":"anomalyInstanceId" + }, + "comment":{ + "shape":"String", + "documentation":"

Optional feedback about this anomaly.

" + }, + "profilingGroupName":{ + "shape":"ProfilingGroupName", + "documentation":"

The name of the profiling group that is associated with the analysis data.

", + "location":"uri", + "locationName":"profilingGroupName" + }, + "type":{ + "shape":"FeedbackType", + "documentation":"

The feedback tpye. Thee are two valid values, Positive and Negative.

" + } + }, + "documentation":"

The structure representing the SubmitFeedbackRequest.

" + }, + "SubmitFeedbackResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

The structure representing the SubmitFeedbackResponse.

" + }, + "TargetFrame":{ + "type":"list", + "member":{"shape":"String"} + }, + "TargetFrames":{ + "type":"list", + "member":{"shape":"TargetFrame"} + }, + "ThreadStates":{ + "type":"list", + "member":{"shape":"String"} + }, "ThrottlingException":{ "type":"structure", "required":["message"], @@ -751,6 +1878,22 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "TimestampStructure":{ + "type":"structure", + "required":["value"], + "members":{ + "value":{ + "shape":"Timestamp", + "documentation":"

A Timestamp. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + } + }, + "documentation":"

A data type that contains a Timestamp object. This is specified using the ISO 8601 format. For example, 2020-06-01T13:15:02.001Z represents 1 millisecond past June 1, 2020 1:15:02 PM UTC.

" + }, + "UnprocessedEndTimeMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"ListOfTimestamps"} + }, "UpdateProfilingGroupRequest":{ "type":"structure", "required":[ @@ -760,7 +1903,7 @@ "members":{ "agentOrchestrationConfig":{ "shape":"AgentOrchestrationConfig", - "documentation":"

" + "documentation":"

Specifies whether profiling is enabled or disabled for a profiling group.

" }, "profilingGroupName":{ "shape":"ProfilingGroupName", @@ -777,12 +1920,23 @@ "members":{ "profilingGroup":{ "shape":"ProfilingGroupDescription", - "documentation":"

Updated information about the profiling group.

" + "documentation":"

A ProfilingGroupDescription that contains information about the returned updated profiling group.

" } }, "documentation":"

The structure representing the updateProfilingGroupResponse.

", "payload":"profilingGroup" }, + "UserFeedback":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"FeedbackType", + "documentation":"

Optional Positive or Negative feedback submitted by the user about whether the recommendation is useful or not.

" + } + }, + "documentation":"

Feedback that can be submitted for each instance of an anomaly by the user. Feedback is be used for improvements in generating recommendations for the application.

" + }, "ValidationException":{ "type":"structure", "required":["message"], @@ -797,5 +1951,5 @@ "exception":true } }, - "documentation":"

This section provides documentation for the Amazon CodeGuru Profiler API operations.

" + "documentation":"

This section provides documentation for the Amazon CodeGuru Profiler API operations.

 <p>Amazon CodeGuru Profiler collects runtime performance data from your live applications, and provides recommendations that can help you fine-tune your application performance. Using machine learning algorithms, CodeGuru Profiler can help you find your most expensive lines of code and suggest ways you can improve efficiency and remove CPU bottlenecks. </p> <p>Amazon CodeGuru Profiler provides different visualizations of profiling data to help you identify what code is running on the CPU, see how much time is consumed, and suggest ways to reduce CPU utilization. </p> <note> <p>Amazon CodeGuru Profiler currently supports applications written in all Java virtual machine (JVM) languages. While CodeGuru Profiler supports both visualizations and recommendations for applications written in Java, it can also generate visualizations and a subset of recommendations for applications written in other JVM languages.</p> </note> <p> For more information, see <a href="https://docs.aws.amazon.com/codeguru/latest/profiler-ug/what-is-codeguru-profiler.html">What is Amazon CodeGuru Profiler</a> in the <i>Amazon CodeGuru Profiler User Guide</i>. </p> 
" } diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index 10e31f186566..eaa3eb9aa98c 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codegurureviewer/src/main/resources/codegen-resources/paginators-1.json b/services/codegurureviewer/src/main/resources/codegen-resources/paginators-1.json index bbc1f584fdd7..a9c76f5b737f 100644 --- a/services/codegurureviewer/src/main/resources/codegen-resources/paginators-1.json +++ b/services/codegurureviewer/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,20 @@ { "pagination": { + "ListCodeReviews": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListRecommendationFeedback": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListRecommendations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListRepositoryAssociations": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json b/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json index f7c36673e07a..02c7e0dde873 100644 --- a/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json +++ b/services/codegurureviewer/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,41 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Associates an AWS CodeCommit repository with Amazon CodeGuru Reviewer. When you associate an AWS CodeCommit repository with Amazon CodeGuru Reviewer, Amazon CodeGuru Reviewer will provide recommendations for each pull request. You can view recommendations in the AWS CodeCommit repository.

You can associate a GitHub repository using the Amazon CodeGuru Reviewer console.

" + "documentation":"

Use to associate an AWS CodeCommit repository or a repostory managed by AWS CodeStar Connections with Amazon CodeGuru Reviewer. When you associate a repository, CodeGuru Reviewer reviews source code changes in the repository's pull requests and provides automatic recommendations. You can view recommendations using the CodeGuru Reviewer console. For more information, see Recommendations in Amazon CodeGuru Reviewer in the Amazon CodeGuru Reviewer User Guide.

If you associate a CodeCommit repository, it must be in the same AWS Region and AWS account where its CodeGuru Reviewer code reviews are configured.

Bitbucket and GitHub Enterprise Server repositories are managed by AWS CodeStar Connections to connect to CodeGuru Reviewer. For more information, see Connect to a repository source provider in the Amazon CodeGuru Reviewer User Guide.

You cannot use the CodeGuru Reviewer SDK or the AWS CLI to associate a GitHub repository with Amazon CodeGuru Reviewer. To associate a GitHub repository, use the console. For more information, see Getting started with CodeGuru Reviewer in the CodeGuru Reviewer User Guide.

" + }, + "DescribeCodeReview":{ + "name":"DescribeCodeReview", + "http":{ + "method":"GET", + "requestUri":"/codereviews/{CodeReviewArn}" + }, + "input":{"shape":"DescribeCodeReviewRequest"}, + "output":{"shape":"DescribeCodeReviewResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns the metadata associated with the code review along with its status.

" + }, + "DescribeRecommendationFeedback":{ + "name":"DescribeRecommendationFeedback", + "http":{ + "method":"GET", + "requestUri":"/feedback/{CodeReviewArn}" + }, + "input":{"shape":"DescribeRecommendationFeedbackRequest"}, + "output":{"shape":"DescribeRecommendationFeedbackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Describes the customer feedback for a CodeGuru Reviewer recommendation.

" }, "DescribeRepositoryAssociation":{ "name":"DescribeRepositoryAssociation", @@ -45,7 +79,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Describes a repository association.

" + "documentation":"

Returns a RepositoryAssociation object that contains information about the requested repository association.

" }, "DisassociateRepository":{ "name":"DisassociateRepository", @@ -65,6 +99,56 @@ ], "documentation":"

Removes the association between Amazon CodeGuru Reviewer and a repository.

" }, + "ListCodeReviews":{ + "name":"ListCodeReviews", + "http":{ + "method":"GET", + "requestUri":"/codereviews" + }, + "input":{"shape":"ListCodeReviewsRequest"}, + "output":{"shape":"ListCodeReviewsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists all the code reviews that the customer has created in the past 90 days.

" + }, + "ListRecommendationFeedback":{ + "name":"ListRecommendationFeedback", + "http":{ + "method":"GET", + "requestUri":"/feedback/{CodeReviewArn}/RecommendationFeedback" + }, + "input":{"shape":"ListRecommendationFeedbackRequest"}, + "output":{"shape":"ListRecommendationFeedbackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of RecommendationFeedbackSummary objects that contain customer recommendation feedback for all CodeGuru Reviewer users.

" + }, + "ListRecommendations":{ + "name":"ListRecommendations", + "http":{ + "method":"GET", + "requestUri":"/codereviews/{CodeReviewArn}/Recommendations" + }, + "input":{"shape":"ListRecommendationsRequest"}, + "output":{"shape":"ListRecommendationsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns the list of all recommendations for a completed code review.

" + }, "ListRepositoryAssociations":{ "name":"ListRepositoryAssociations", "http":{ @@ -78,7 +162,24 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists repository associations. You can optionally filter on one or more of the following recommendation properties: provider types, states, names, and owners.

" + "documentation":"

Returns a list of RepositoryAssociationSummary objects that contain summary information about a repository association. You can filter the returned list by ProviderType , Name , State , and Owner .

" + }, + "PutRecommendationFeedback":{ + "name":"PutRecommendationFeedback", + "http":{ + "method":"PUT", + "requestUri":"/feedback" + }, + "input":{"shape":"PutRecommendationFeedbackRequest"}, + "output":{"shape":"PutRecommendationFeedbackResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Stores customer feedback for a CodeGuru Reviewer recommendation. When this API is called again with different reactions the previous feedback is overwritten.

" } }, "shapes":{ @@ -95,7 +196,7 @@ "type":"string", "max":1600, "min":1, - "pattern":"^arn:aws[^:\\s]*:codeguru-reviewer:[^:\\s]+:[\\d]{12}:[a-z]+:[\\w-]+$" + "pattern":"^arn:aws[^:\\s]*:codeguru-reviewer:[^:\\s]+:[\\d]{12}:[a-z-]+:[\\w-]+$" }, "AssociateRepositoryRequest":{ "type":"structure", @@ -107,7 +208,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

If you want to add a new repository association, this parameter specifies a unique identifier for the new repository association that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You typically only need to interact with this value if you implement your own retry logic and want to ensure that a given repository association is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified repository association.

Amazon CodeGuru Reviewer uses this value to prevent the accidental creation of duplicate repository associations if there are failures and retries.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

To add a new repository association, this parameter specifies a unique identifier for the new repository association that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDKs to call this operation, you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You typically interact with this value if you implement your own retry logic and want to ensure that a given repository association is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified repository association.

Amazon CodeGuru Reviewer uses this value to prevent the accidental creation of duplicate repository associations if there are failures and retries.

", "idempotencyToken":true } } @@ -138,10 +239,141 @@ "members":{ "Name":{ "shape":"Name", - "documentation":"

The name of the AWS CodeCommit repository.

" + "documentation":"

The name of the AWS CodeCommit repository. For more information, see repositoryName in the AWS CodeCommit API Reference.

" } }, - "documentation":"

Information about an AWS CodeCommit repository.

" + "documentation":"

Information about an AWS CodeCommit repository. The CodeCommit repository must be in the same AWS Region and AWS account where its CodeGuru Reviewer code reviews are configured.

" + }, + "CodeReview":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the code review.

" + }, + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the CodeReview object.

" + }, + "RepositoryName":{ + "shape":"Name", + "documentation":"

The name of the repository.

" + }, + "Owner":{ + "shape":"Owner", + "documentation":"

The owner of the repository. For an AWS CodeCommit repository, this is the AWS account ID of the account that owns the repository. For a GitHub or Bitbucket repository, this is the username for the account that owns the repository.

" + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

The type of repository that contains the reviewed code (for example, GitHub or Bitbucket).

" + }, + "State":{ + "shape":"JobState", + "documentation":"

The valid code review states are:

  • Completed: The code review is complete.

  • Pending: The code review started and has not completed or failed.

  • Failed: The code review failed.

  • Deleting: The code review is being deleted.

" + }, + "StateReason":{ + "shape":"StateReason", + "documentation":"

The reason for the state of the code review.

" + }, + "CreatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

The time, in milliseconds since the epoch, when the code review was created.

" + }, + "LastUpdatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

The time, in milliseconds since the epoch, when the code review was last updated.

" + }, + "Type":{ + "shape":"Type", + "documentation":"

The type of code review.

" + }, + "PullRequestId":{ + "shape":"PullRequestId", + "documentation":"

The pull request ID for the code review.

" + }, + "SourceCodeType":{ + "shape":"SourceCodeType", + "documentation":"

The type of the source code for the code review.

" + }, + "Metrics":{ + "shape":"Metrics", + "documentation":"

The statistics from the code review.

" + } + }, + "documentation":"

Information about a code review.

" + }, + "CodeReviewSummaries":{ + "type":"list", + "member":{"shape":"CodeReviewSummary"} + }, + "CodeReviewSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the code review.

" + }, + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the CodeReview object.

" + }, + "RepositoryName":{ + "shape":"Name", + "documentation":"

The name of the repository.

" + }, + "Owner":{ + "shape":"Owner", + "documentation":"

The owner of the repository. For an AWS CodeCommit repository, this is the AWS account ID of the account that owns the repository. For a GitHub or Bitbucket repository, this is the username for the account that owns the repository.

" + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

The provider type of the repository association.

" + }, + "State":{ + "shape":"JobState", + "documentation":"

The state of the code review.

The valid code review states are:

  • Completed: The code review is complete.

  • Pending: The code review started and has not completed or failed.

  • Failed: The code review failed.

  • Deleting: The code review is being deleted.

" + }, + "CreatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

The time, in milliseconds since the epoch, when the code review was created.

" + }, + "LastUpdatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

The time, in milliseconds since the epoch, when the code review was last updated.

" + }, + "Type":{ + "shape":"Type", + "documentation":"

The type of the code review.

" + }, + "PullRequestId":{ + "shape":"PullRequestId", + "documentation":"

The pull request ID for the code review.

" + }, + "MetricsSummary":{ + "shape":"MetricsSummary", + "documentation":"

The statistics from the code review.

" + } + }, + "documentation":"

Information about the summary of the code review.

" + }, + "CommitDiffSourceCodeType":{ + "type":"structure", + "members":{ + "SourceCommit":{ + "shape":"CommitId", + "documentation":"

The SHA of the source commit.

" + }, + "DestinationCommit":{ + "shape":"CommitId", + "documentation":"

The SHA of the destination commit.

" + } + }, + "documentation":"

The commit diff for the pull request.

" + }, + "CommitId":{ + "type":"string", + "max":64, + "min":6 }, "ConflictException":{ "type":"structure", @@ -152,13 +384,76 @@ "error":{"httpStatusCode":409}, "exception":true }, + "ConnectionArn":{ + "type":"string", + "max":256, + "min":0, + "pattern":"arn:aws(-[\\w]+)*:.+:.+:[0-9]{12}:.+" + }, + "DescribeCodeReviewRequest":{ + "type":"structure", + "required":["CodeReviewArn"], + "members":{ + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the CodeReview object.

", + "location":"uri", + "locationName":"CodeReviewArn" + } + } + }, + "DescribeCodeReviewResponse":{ + "type":"structure", + "members":{ + "CodeReview":{ + "shape":"CodeReview", + "documentation":"

Information about the code review.

" + } + } + }, + "DescribeRecommendationFeedbackRequest":{ + "type":"structure", + "required":[ + "CodeReviewArn", + "RecommendationId" + ], + "members":{ + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the CodeReview object.

", + "location":"uri", + "locationName":"CodeReviewArn" + }, + "RecommendationId":{ + "shape":"RecommendationId", + "documentation":"

The recommendation ID that can be used to track the provided recommendations and then to collect the feedback.

", + "location":"querystring", + "locationName":"RecommendationId" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

Optional parameter to describe the feedback for a given user. If this is not supplied, it defaults to the user making the request.

The UserId is an IAM principal that can be specified as an AWS account ID or an Amazon Resource Name (ARN). For more information, see Specifying a Principal in the AWS Identity and Access Management User Guide.

", + "location":"querystring", + "locationName":"UserId" + } + } + }, + "DescribeRecommendationFeedbackResponse":{ + "type":"structure", + "members":{ + "RecommendationFeedback":{ + "shape":"RecommendationFeedback", + "documentation":"

The recommendation feedback given by the user.

" + } + } + }, "DescribeRepositoryAssociationRequest":{ "type":"structure", "required":["AssociationArn"], "members":{ "AssociationArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) identifying the association.

", + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object. You can retrieve this ARN by calling ListRepositories.

", "location":"uri", "locationName":"AssociationArn" } @@ -179,7 +474,7 @@ "members":{ "AssociationArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) identifying the association.

", + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object.

", "location":"uri", "locationName":"AssociationArn" } @@ -195,6 +490,12 @@ } }, "ErrorMessage":{"type":"string"}, + "FilePath":{ + "type":"string", + "max":1024, + "min":1 + }, + "FindingsCount":{"type":"long"}, "InternalServerException":{ "type":"structure", "members":{ @@ -205,6 +506,168 @@ "exception":true, "fault":true }, + "JobState":{ + "type":"string", + "enum":[ + "Completed", + "Pending", + "Failed", + "Deleting" + ] + }, + "JobStates":{ + "type":"list", + "member":{"shape":"JobState"}, + "max":3, + "min":1 + }, + "LineNumber":{"type":"integer"}, + "ListCodeReviewsMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListCodeReviewsRequest":{ + "type":"structure", + "required":["Type"], + "members":{ + "ProviderTypes":{ + "shape":"ProviderTypes", + "documentation":"

List of provider types for filtering that needs to be applied before displaying the result. For example, providerTypes=[GitHub] lists code reviews from GitHub.

", + "location":"querystring", + "locationName":"ProviderTypes" + }, + "States":{ + "shape":"JobStates", + "documentation":"

List of states for filtering that needs to be applied before displaying the result. For example, states=[Pending] lists code reviews in the Pending state.

The valid code review states are:

  • Completed: The code review is complete.

  • Pending: The code review started and has not completed or failed.

  • Failed: The code review failed.

  • Deleting: The code review is being deleted.

", + "location":"querystring", + "locationName":"States" + }, + "RepositoryNames":{ + "shape":"RepositoryNames", + "documentation":"

List of repository names for filtering that needs to be applied before displaying the result.

", + "location":"querystring", + "locationName":"RepositoryNames" + }, + "Type":{ + "shape":"Type", + "documentation":"

The type of code reviews to list in the response.

", + "location":"querystring", + "locationName":"Type" + }, + "MaxResults":{ + "shape":"ListCodeReviewsMaxResults", + "documentation":"

The maximum number of results that are returned per call. The default is 100.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListCodeReviewsResponse":{ + "type":"structure", + "members":{ + "CodeReviewSummaries":{ + "shape":"CodeReviewSummaries", + "documentation":"

A list of code reviews that meet the criteria of the request.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Pagination token.

" + } + } + }, + "ListRecommendationFeedbackRequest":{ + "type":"structure", + "required":["CodeReviewArn"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results that are returned per call. The default is 100.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the CodeReview object.

", + "location":"uri", + "locationName":"CodeReviewArn" + }, + "UserIds":{ + "shape":"UserIds", + "documentation":"

An AWS user's account ID or Amazon Resource Name (ARN). Use this ID to query the recommendation feedback for a code review from that user.

The UserId is an IAM principal that can be specified as an AWS account ID or an Amazon Resource Name (ARN). For more information, see Specifying a Principal in the AWS Identity and Access Management User Guide.

", + "location":"querystring", + "locationName":"UserIds" + }, + "RecommendationIds":{ + "shape":"RecommendationIds", + "documentation":"

Used to query the recommendation feedback for a given recommendation.

", + "location":"querystring", + "locationName":"RecommendationIds" + } + } + }, + "ListRecommendationFeedbackResponse":{ + "type":"structure", + "members":{ + "RecommendationFeedbackSummaries":{ + "shape":"RecommendationFeedbackSummaries", + "documentation":"

Recommendation feedback summaries corresponding to the code review ARN.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + } + } + }, + "ListRecommendationsRequest":{ + "type":"structure", + "required":["CodeReviewArn"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

Pagination token.

", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results that are returned per call. The default is 100.

", + "location":"querystring", + "locationName":"MaxResults" + }, + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the CodeReview object.

", + "location":"uri", + "locationName":"CodeReviewArn" + } + } + }, + "ListRecommendationsResponse":{ + "type":"structure", + "members":{ + "RecommendationSummaries":{ + "shape":"RecommendationSummaries", + "documentation":"

List of recommendations for the requested code review.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Pagination token.

" + } + } + }, "ListRepositoryAssociationsRequest":{ "type":"structure", "members":{ @@ -216,31 +679,31 @@ }, "States":{ "shape":"RepositoryAssociationStates", - "documentation":"

List of states to use as a filter.

", + "documentation":"

List of repository association states to use as a filter.

The valid repository association states are:

  • Associated: The repository association is complete.

  • Associating: CodeGuru Reviewer is:

    • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

      If your repository ProviderType is GitHub or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

    • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

  • Failed: The repository failed to associate or disassociate.

  • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

", "location":"querystring", "locationName":"State" }, "Names":{ "shape":"Names", - "documentation":"

List of names to use as a filter.

", + "documentation":"

List of repository names to use as a filter.

", "location":"querystring", "locationName":"Name" }, "Owners":{ "shape":"Owners", - "documentation":"

List of owners to use as a filter. For AWS CodeCommit, the owner is the AWS account id. For GitHub, it is the GitHub account name.

", + "documentation":"

List of owners to use as a filter. For AWS CodeCommit, it is the name of the CodeCommit account that was used to associate the repository. For other repository source providers, such as Bitbucket, this is name of the account that was used to associate the repository.

", "location":"querystring", "locationName":"Owner" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of repository association results returned by ListRepositoryAssociations in paginated output. When this parameter is used, ListRepositoryAssociations only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListRepositoryAssociations request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListRepositoryAssociations returns up to 100 results and a nextToken value if applicable.

", + "documentation":"

The maximum number of repository association results returned by ListRepositoryAssociations in paginated output. When this parameter is used, ListRepositoryAssociations only returns maxResults results in a single page with a nextToken response element. The remaining results of the initial request can be seen by sending another ListRepositoryAssociations request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, ListRepositoryAssociations returns up to 100 results and a nextToken value if applicable.

", "location":"querystring", "locationName":"MaxResults" }, "NextToken":{ "shape":"NextToken", - "documentation":"

The nextToken value returned from a previous paginated ListRepositoryAssociations request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "documentation":"

The nextToken value returned from a previous paginated ListRepositoryAssociations request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

Treat this token as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "location":"querystring", "locationName":"NextToken" } @@ -264,10 +727,40 @@ "max":100, "min":1 }, + "MeteredLinesOfCodeCount":{"type":"long"}, + "Metrics":{ + "type":"structure", + "members":{ + "MeteredLinesOfCodeCount":{ + "shape":"MeteredLinesOfCodeCount", + "documentation":"

Lines of code metered in the code review. For the initial code review pull request and all subsequent revisions, this includes all lines of code in the files added to the pull request. In subsequent revisions, for files that already existed in the pull request, this includes only the changed lines of code. In both cases, this does not include non-code lines such as comments and import statements. For example, if you submit a pull request containing 5 files, each with 500 lines of code, and in a subsequent revision you added a new file with 200 lines of code, and also modified a total of 25 lines across the initial 5 files, MeteredLinesOfCodeCount includes the first 5 files (5 * 500 = 2,500 lines), the new file (200 lines) and the 25 changed lines of code for a total of 2,725 lines of code.

" + }, + "FindingsCount":{ + "shape":"FindingsCount", + "documentation":"

Total number of recommendations found in the code review.

" + } + }, + "documentation":"

Information about the statistics from the code review.

" + }, + "MetricsSummary":{ + "type":"structure", + "members":{ + "MeteredLinesOfCodeCount":{ + "shape":"MeteredLinesOfCodeCount", + "documentation":"

Lines of code metered in the code review. For the initial code review pull request and all subsequent revisions, this includes all lines of code in the files added to the pull request. In subsequent revisions, for files that already existed in the pull request, this includes only the changed lines of code. In both cases, this does not include non-code lines such as comments and import statements. For example, if you submit a pull request containing 5 files, each with 500 lines of code, and in a subsequent revision you added a new file with 200 lines of code, and also modified a total of 25 lines across the initial 5 files, MeteredLinesOfCodeCount includes the first 5 files (5 * 500 = 2,500 lines), the new file (200 lines) and the 25 changed lines of code for a total of 2,725 lines of code.

" + }, + "FindingsCount":{ + "shape":"FindingsCount", + "documentation":"

Total number of recommendations found in the code review.

" + } + }, + "documentation":"

Information about metrics summaries.

" + }, "Name":{ "type":"string", "max":100, - "min":1 + "min":1, + "pattern":"^\\S[\\w.-]*$" }, "Names":{ "type":"list", @@ -292,7 +785,8 @@ "Owner":{ "type":"string", "max":100, - "min":1 + "min":1, + "pattern":"^\\S(.*\\S)?$" }, "Owners":{ "type":"list", @@ -304,7 +798,9 @@ "type":"string", "enum":[ "CodeCommit", - "GitHub" + "GitHub", + "Bitbucket", + "GitHubEnterpriseServer" ] }, "ProviderTypes":{ @@ -313,34 +809,184 @@ "max":3, "min":1 }, + "PullRequestId":{ + "type":"string", + "max":64, + "min":1 + }, + "PutRecommendationFeedbackRequest":{ + "type":"structure", + "required":[ + "CodeReviewArn", + "RecommendationId", + "Reactions" + ], + "members":{ + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the CodeReview object.

" + }, + "RecommendationId":{ + "shape":"RecommendationId", + "documentation":"

The recommendation ID that can be used to track the provided recommendations and then to collect the feedback.

" + }, + "Reactions":{ + "shape":"Reactions", + "documentation":"

List for storing reactions. Reactions are utf-8 text code for emojis. If you send an empty list it clears all your feedback.

" + } + } + }, + "PutRecommendationFeedbackResponse":{ + "type":"structure", + "members":{ + } + }, + "Reaction":{ + "type":"string", + "enum":[ + "ThumbsUp", + "ThumbsDown" + ] + }, + "Reactions":{ + "type":"list", + "member":{"shape":"Reaction"}, + "max":1, + "min":0 + }, + "RecommendationFeedback":{ + "type":"structure", + "members":{ + "CodeReviewArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the CodeReview object.

" + }, + "RecommendationId":{ + "shape":"RecommendationId", + "documentation":"

The recommendation ID that can be used to track the provided recommendations. Later on it can be used to collect the feedback.

" + }, + "Reactions":{ + "shape":"Reactions", + "documentation":"

List for storing reactions. Reactions are utf-8 text code for emojis. You can send an empty list to clear off all your feedback.

" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

The ID of the user that made the API call.

The UserId is an IAM principal that can be specified as an AWS account ID or an Amazon Resource Name (ARN). For more information, see Specifying a Principal in the AWS Identity and Access Management User Guide.

" + }, + "CreatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

The time at which the feedback was created.

" + }, + "LastUpdatedTimeStamp":{ + "shape":"TimeStamp", + "documentation":"

The time at which the feedback was last updated.

" + } + }, + "documentation":"

Information about the recommendation feedback.

" + }, + "RecommendationFeedbackSummaries":{ + "type":"list", + "member":{"shape":"RecommendationFeedbackSummary"} + }, + "RecommendationFeedbackSummary":{ + "type":"structure", + "members":{ + "RecommendationId":{ + "shape":"RecommendationId", + "documentation":"

The recommendation ID that can be used to track the provided recommendations. Later on it can be used to collect the feedback.

" + }, + "Reactions":{ + "shape":"Reactions", + "documentation":"

List for storing reactions. Reactions are utf-8 text code for emojis.

" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

The ID of the user that gave the feedback.

The UserId is an IAM principal that can be specified as an AWS account ID or an Amazon Resource Name (ARN). For more information, see Specifying a Principal in the AWS Identity and Access Management User Guide.

" + } + }, + "documentation":"

Information about recommendation feedback summaries.

" + }, + "RecommendationId":{ + "type":"string", + "max":64, + "min":1 + }, + "RecommendationIds":{ + "type":"list", + "member":{"shape":"RecommendationId"}, + "max":100, + "min":1 + }, + "RecommendationSummaries":{ + "type":"list", + "member":{"shape":"RecommendationSummary"} + }, + "RecommendationSummary":{ + "type":"structure", + "members":{ + "FilePath":{ + "shape":"FilePath", + "documentation":"

Name of the file on which a recommendation is provided.

" + }, + "RecommendationId":{ + "shape":"RecommendationId", + "documentation":"

The recommendation ID that can be used to track the provided recommendations. Later on it can be used to collect the feedback.

" + }, + "StartLine":{ + "shape":"LineNumber", + "documentation":"

Start line from where the recommendation is applicable in the source commit or source branch.

" + }, + "EndLine":{ + "shape":"LineNumber", + "documentation":"

Last line where the recommendation is applicable in the source commit or source branch. For a single line comment the start line and end line values are the same.

" + }, + "Description":{ + "shape":"Text", + "documentation":"

A description of the recommendation generated by CodeGuru Reviewer for the lines of code between the start line and the end line.

" + } + }, + "documentation":"

Information about recommendations.

" + }, "Repository":{ "type":"structure", "members":{ "CodeCommit":{ "shape":"CodeCommitRepository", "documentation":"

Information about an AWS CodeCommit repository.

" + }, + "Bitbucket":{ + "shape":"ThirdPartySourceRepository", + "documentation":"

Information about a Bitbucket repository.

" + }, + "GitHubEnterpriseServer":{ + "shape":"ThirdPartySourceRepository", + "documentation":"

Information about a GitHub Enterprise Server repository.

" } }, - "documentation":"

Information about a repository.

" + "documentation":"

Information about an associated AWS CodeCommit repository or an associated repository that is managed by AWS CodeStar Connections (for example, Bitbucket). This Repository object is not used if your source code is in an associated GitHub repository.

" }, "RepositoryAssociation":{ "type":"structure", "members":{ "AssociationId":{ "shape":"AssociationId", - "documentation":"

The id of the repository association.

" + "documentation":"

The ID of the repository association.

" }, "AssociationArn":{ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) identifying the repository association.

" }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS CodeStar Connections connection. Its format is arn:aws:codestar-connections:region-id:aws-account_id:connection/connection-id. For more information, see Connection in the AWS CodeStar Connections API Reference.

" + }, "Name":{ "shape":"Name", "documentation":"

The name of the repository.

" }, "Owner":{ "shape":"Owner", - "documentation":"

The owner of the repository.

" + "documentation":"

The owner of the repository. For an AWS CodeCommit repository, this is the AWS account ID of the account that owns the repository. For a GitHub or Bitbucket repository, this is the username for the account that owns the repository.

" }, "ProviderType":{ "shape":"ProviderType", @@ -348,7 +994,7 @@ }, "State":{ "shape":"RepositoryAssociationState", - "documentation":"

The state of the repository association.

" + "documentation":"

The state of the repository association.

The valid repository association states are:

  • Associated: The repository association is complete.

  • Associating: CodeGuru Reviewer is:

    • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

      If your repository ProviderType is GitHub or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

    • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

  • Failed: The repository failed to associate or disassociate.

  • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

" }, "StateReason":{ "shape":"StateReason", @@ -363,7 +1009,7 @@ "documentation":"

The time, in milliseconds since the epoch, when the repository association was created.

" } }, - "documentation":"

Information about a repository association.

" + "documentation":"

Information about a repository association. The DescribeRepositoryAssociation operation returns a RepositoryAssociation object.

" }, "RepositoryAssociationState":{ "type":"string", @@ -389,7 +1035,11 @@ "members":{ "AssociationArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) identifying the repository association.

" + "documentation":"

The Amazon Resource Name (ARN) of the RepositoryAssociation object.

" + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS CodeStar Connections connection. Its format is arn:aws:codestar-connections:region-id:aws-account_id:connection/connection-id. For more information, see Connection in the AWS CodeStar Connections API Reference.

" }, "LastUpdatedTimeStamp":{ "shape":"TimeStamp", @@ -397,7 +1047,7 @@ }, "AssociationId":{ "shape":"AssociationId", - "documentation":"

The repository association ID.

" + "documentation":"

The repository association ID.

" }, "Name":{ "shape":"Name", @@ -405,7 +1055,7 @@ }, "Owner":{ "shape":"Owner", - "documentation":"

The owner of the repository association.

" + "documentation":"

The owner of the repository. For an AWS CodeCommit repository, this is the AWS account ID of the account that owns the repository. For a GitHub or Bitbucket repository, this is the username for the account that owns the repository.

" }, "ProviderType":{ "shape":"ProviderType", @@ -413,16 +1063,69 @@ }, "State":{ "shape":"RepositoryAssociationState", - "documentation":"

The state of the repository association.

Associated

Amazon CodeGuru Reviewer is associated with the repository.

Associating

The association is in progress.

Failed

The association failed. For more information about troubleshooting (or why it failed), see [troubleshooting topic].

Disassociating

Amazon CodeGuru Reviewer is in the process of disassociating with the repository.

" + "documentation":"

The state of the repository association.

The valid repository association states are:

  • Associated: The repository association is complete.

  • Associating: CodeGuru Reviewer is:

    • Setting up pull request notifications. This is required for pull requests to trigger a CodeGuru Reviewer review.

      If your repository ProviderType is GitHub or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your repository cannot be triggered.

    • Setting up source code access. This is required for CodeGuru Reviewer to securely clone code in your repository.

  • Failed: The repository failed to associate or disassociate.

  • Disassociating: CodeGuru Reviewer is removing the repository's pull request notifications and source code access.

" } }, - "documentation":"

Information about a repository association.

" + "documentation":"

Summary information about a repository association. The ListRepositoryAssociations operation returns a list of RepositoryAssociationSummary objects.

" + }, + "RepositoryNames":{ + "type":"list", + "member":{"shape":"Name"}, + "max":100, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource specified in the request was not found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SourceCodeType":{ + "type":"structure", + "members":{ + "CommitDiff":{ + "shape":"CommitDiffSourceCodeType", + "documentation":"

The commit diff for the pull request.

" + } + }, + "documentation":"

Information about the source code type.

" }, "StateReason":{ "type":"string", "max":256, "min":0 }, + "Text":{ + "type":"string", + "max":2048, + "min":1 + }, + "ThirdPartySourceRepository":{ + "type":"structure", + "required":[ + "Name", + "ConnectionArn", + "Owner" + ], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the third party source repository.

" + }, + "ConnectionArn":{ + "shape":"ConnectionArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS CodeStar Connections connection. Its format is arn:aws:codestar-connections:region-id:aws-account_id:connection/connection-id. For more information, see Connection in the AWS CodeStar Connections API Reference.

" + }, + "Owner":{ + "shape":"Owner", + "documentation":"

The owner of the repository. For a GitHub, GitHub Enterprise, or Bitbucket repository, this is the username for the account that owns the repository.

" + } + }, + "documentation":"

Information about a third-party source repository connected to CodeGuru Reviewer.

" + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -433,6 +1136,21 @@ "exception":true }, "TimeStamp":{"type":"timestamp"}, + "Type":{ + "type":"string", + "enum":["PullRequest"] + }, + "UserId":{ + "type":"string", + "max":256, + "min":1 + }, + "UserIds":{ + "type":"list", + "member":{"shape":"UserId"}, + "max":100, + "min":1 + }, "ValidationException":{ "type":"structure", "members":{ @@ -443,5 +1161,5 @@ "exception":true } }, - "documentation":"

This section provides documentation for the Amazon CodeGuru Reviewer API operations.

" + "documentation":"

This section provides documentation for the Amazon CodeGuru Reviewer API operations. CodeGuru Reviewer is a service that uses program analysis and machine learning to detect potential defects that are difficult for developers to find and recommends fixes in your Java code.

By proactively detecting and providing recommendations for addressing code defects and implementing best practices, CodeGuru Reviewer improves the overall quality and maintainability of your code base during the code review stage. For more information about CodeGuru Reviewer, see the Amazon CodeGuru Reviewer User Guide.

" } diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index f1ea7654da39..a8fd8524f474 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codestar/pom.xml b/services/codestar/pom.xml index 29499513b7ab..b7394bf53ff8 100644 --- a/services/codestar/pom.xml +++ b/services/codestar/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codestar AWS Java SDK :: Services :: AWS CodeStar diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index eb8111e5ff54..38c90e59551a 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarconnections/src/main/resources/codegen-resources/paginators-1.json b/services/codestarconnections/src/main/resources/codegen-resources/paginators-1.json index deffa71f04db..1b3698862cab 100644 --- a/services/codestarconnections/src/main/resources/codegen-resources/paginators-1.json +++ b/services/codestarconnections/src/main/resources/codegen-resources/paginators-1.json @@ -4,6 +4,11 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" + }, + "ListHosts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/services/codestarconnections/src/main/resources/codegen-resources/service-2.json b/services/codestarconnections/src/main/resources/codegen-resources/service-2.json index 9116e8ecc3fe..01ec2ed423aa 100644 --- a/services/codestarconnections/src/main/resources/codegen-resources/service-2.json +++ b/services/codestarconnections/src/main/resources/codegen-resources/service-2.json @@ -22,10 +22,25 @@ "input":{"shape":"CreateConnectionInput"}, "output":{"shape":"CreateConnectionOutput"}, "errors":[ - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"} ], "documentation":"

Creates a connection that can then be given to other AWS services like CodePipeline so that it can access third-party code repositories. The connection is in pending status until the third-party connection handshake is completed from the console.

" }, + "CreateHost":{ + "name":"CreateHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHostInput"}, + "output":{"shape":"CreateHostOutput"}, + "errors":[ + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a resource that represents the infrastructure where a third-party provider is installed. The host is used when you create connections to an installed third-party provider type, such as GitHub Enterprise Server. You create one host for all connections to that provider.

A host created through the CLI or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE` by setting up the host in the console.

" + }, "DeleteConnection":{ "name":"DeleteConnection", "http":{ @@ -39,6 +54,20 @@ ], "documentation":"

The connection to be deleted.

" }, + "DeleteHost":{ + "name":"DeleteHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHostInput"}, + "output":{"shape":"DeleteHostOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

The host to be deleted. Before you delete a host, all connections associated to the host must be deleted.

A host cannot be deleted if it is in the VPC_CONFIG_INITIALIZING or VPC_CONFIG_DELETING state.

" + }, "GetConnection":{ "name":"GetConnection", "http":{ @@ -48,10 +77,24 @@ "input":{"shape":"GetConnectionInput"}, "output":{"shape":"GetConnectionOutput"}, "errors":[ - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"} ], "documentation":"

Returns the connection ARN and details such as status, owner, and provider type.

" }, + "GetHost":{ + "name":"GetHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetHostInput"}, + "output":{"shape":"GetHostOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the host ARN and details such as status, provider type, endpoint, and, if applicable, the VPC configuration.

" + }, "ListConnections":{ "name":"ListConnections", "http":{ @@ -61,6 +104,56 @@ "input":{"shape":"ListConnectionsInput"}, "output":{"shape":"ListConnectionsOutput"}, "documentation":"

Lists the connections associated with your account.

" + }, + "ListHosts":{ + "name":"ListHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListHostsInput"}, + "output":{"shape":"ListHostsOutput"}, + "documentation":"

Lists the hosts associated with your account.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets the set of key-value pairs (metadata) that are used to manage the resource.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Adds to or modifies the tags of the given resource. Tags are metadata that can be used to manage a resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes tags from an AWS resource.

" } }, "shapes":{ @@ -70,6 +163,11 @@ "min":12, "pattern":"[0-9]{12}" }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, "Connection":{ "type":"structure", "members":{ @@ -83,18 +181,22 @@ }, "ProviderType":{ "shape":"ProviderType", - "documentation":"

The name of the external provider where your third-party code repository is configured. Currently, the valid provider type is Bitbucket.

" + "documentation":"

The name of the external provider where your third-party code repository is configured. The valid provider type is Bitbucket.

" }, "OwnerAccountId":{ "shape":"AccountId", - "documentation":"

The name of the external provider where your third-party code repository is configured. For Bitbucket, this is the account ID of the owner of the Bitbucket repository.

" + "documentation":"

The identifier of the external provider where your third-party code repository is configured. For Bitbucket, this is the account ID of the owner of the Bitbucket repository.

" }, "ConnectionStatus":{ "shape":"ConnectionStatus", "documentation":"

The current status of the connection.

" + }, + "HostArn":{ + "shape":"HostArn", + "documentation":"

The Amazon Resource Name (ARN) of the host associated with the connection.

" } }, - "documentation":"

The configuration that allows a service such as CodePipeline to connect to a third-party code repository.

" + "documentation":"

A resource that is used to connect third-party source providers with services like AWS CodePipeline.

Note: A connection created through CloudFormation, the CLI, or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE` by updating the connection in the console.

" }, "ConnectionArn":{ "type":"string", @@ -121,18 +223,23 @@ }, "CreateConnectionInput":{ "type":"structure", - "required":[ - "ProviderType", - "ConnectionName" - ], + "required":["ConnectionName"], "members":{ "ProviderType":{ "shape":"ProviderType", - "documentation":"

The name of the external provider where your third-party code repository is configured. Currently, the valid provider type is Bitbucket.

" + "documentation":"

The name of the external provider where your third-party code repository is configured. The valid provider type is Bitbucket.

" }, "ConnectionName":{ "shape":"ConnectionName", "documentation":"

The name of the connection to be created. The name must be unique in the calling AWS account.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The key-value pair to use when tagging the resource.

" + }, + "HostArn":{ + "shape":"HostArn", + "documentation":"

The Amazon Resource Name (ARN) of the host associated with the connection to be created.

" } } }, @@ -143,6 +250,45 @@ "ConnectionArn":{ "shape":"ConnectionArn", "documentation":"

The Amazon Resource Name (ARN) of the connection to be created. The ARN is used as the connection reference when the connection is shared between AWS services.

The ARN is never reused if the connection is deleted.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Specifies the tags applied to the resource.

" + } + } + }, + "CreateHostInput":{ + "type":"structure", + "required":[ + "Name", + "ProviderType", + "ProviderEndpoint" + ], + "members":{ + "Name":{ + "shape":"HostName", + "documentation":"

The name of the host to be created. The name must be unique in the calling AWS account.

" + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

The name of the installed provider to be associated with your connection. The host resource represents the infrastructure where your provider type is installed. The valid provider type is GitHub Enterprise Server.

" + }, + "ProviderEndpoint":{ + "shape":"Url", + "documentation":"

The endpoint of the infrastructure to be represented by the host after it is created.

" + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

The VPC configuration to be provisioned for the host. A VPC must be configured and the infrastructure to be represented by the host must already be connected to the VPC.

" + } + } + }, + "CreateHostOutput":{ + "type":"structure", + "members":{ + "HostArn":{ + "shape":"HostArn", + "documentation":"

The Amazon Resource Name (ARN) of the host to be created.

" } } }, @@ -161,6 +307,21 @@ "members":{ } }, + "DeleteHostInput":{ + "type":"structure", + "required":["HostArn"], + "members":{ + "HostArn":{ + "shape":"HostArn", + "documentation":"

The Amazon Resource Name (ARN) of the host to be deleted.

" + } + } + }, + "DeleteHostOutput":{ + "type":"structure", + "members":{ + } + }, "ErrorMessage":{ "type":"string", "max":600 @@ -184,6 +345,92 @@ } } }, + "GetHostInput":{ + "type":"structure", + "required":["HostArn"], + "members":{ + "HostArn":{ + "shape":"HostArn", + "documentation":"

The Amazon Resource Name (ARN) of the requested host.

" + } + } + }, + "GetHostOutput":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"HostName", + "documentation":"

The name of the requested host.

" + }, + "Status":{ + "shape":"HostStatus", + "documentation":"

The status of the requested host.

" + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

The provider type of the requested host, such as GitHub Enterprise Server.

" + }, + "ProviderEndpoint":{ + "shape":"Url", + "documentation":"

The endpoint of the infrastructure represented by the requested host.

" + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

The VPC configuration of the requested host.

" + } + } + }, + "Host":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"HostName", + "documentation":"

The name of the host.

" + }, + "HostArn":{ + "shape":"HostArn", + "documentation":"

The Amazon Resource Name (ARN) of the host.

" + }, + "ProviderType":{ + "shape":"ProviderType", + "documentation":"

The name of the installed provider to be associated with your connection. The host resource represents the infrastructure where your provider type is installed. The valid provider type is GitHub Enterprise Server.

" + }, + "ProviderEndpoint":{ + "shape":"Url", + "documentation":"

The endpoint of the infrastructure where your provider type is installed.

" + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

The VPC configuration provisioned for the host.

" + }, + "Status":{ + "shape":"HostStatus", + "documentation":"

The status of the host, such as PENDING, AVAILABLE, VPC_CONFIG_DELETING, VPC_CONFIG_INITIALIZING, and VPC_CONFIG_FAILED_INITIALIZATION.

" + }, + "StatusMessage":{ + "shape":"HostStatusMessage", + "documentation":"

The status description for the host.

" + } + }, + "documentation":"

A resource that represents the infrastructure where a third-party provider is installed. The host is used when you create connections to an installed third-party provider type, such as GitHub Enterprise Server. You create one host for all connections to that provider.

A host created through the CLI or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE` by setting up the host in the console.

" + }, + "HostArn":{ + "type":"string", + "max":256, + "min":0, + "pattern":"arn:aws(-[\\w]+)*:codestar-connections:.+:[0-9]{12}:host\\/.+" + }, + "HostList":{ + "type":"list", + "member":{"shape":"Host"} + }, + "HostName":{ + "type":"string", + "max":32, + "min":1 + }, + "HostStatus":{"type":"string"}, + "HostStatusMessage":{"type":"string"}, "LimitExceededException":{ "type":"structure", "members":{ @@ -199,6 +446,10 @@ "shape":"ProviderType", "documentation":"

Filters the list of connections to those associated with a specified provider, such as Bitbucket.

" }, + "HostArnFilter":{ + "shape":"HostArn", + "documentation":"

Filters the list of connections to those associated with a specified host.

" + }, "MaxResults":{ "shape":"MaxResults", "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" @@ -222,20 +473,67 @@ } } }, + "ListHostsInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token that was returned from the previous ListHosts call, which can be used to return the next set of hosts in the list.

" + } + } + }, + "ListHostsOutput":{ + "type":"structure", + "members":{ + "Hosts":{ + "shape":"HostList", + "documentation":"

A list of hosts and the details for each host, such as status, endpoint, and provider type.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token that can be used in the next ListHosts call. To view all items in the list, continue to call this operation with each subsequent token until no more nextToken values are returned.

" + } + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource for which you want to get information about tags, if any.

" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag key and value pairs associated with the specified resource.

" + } + } + }, "MaxResults":{ "type":"integer", - "max":50, - "min":1 + "max":100, + "min":0 }, "NextToken":{ "type":"string", "max":1024, - "min":1, - "pattern":"[a-zA-Z0-9=\\-\\\\/]+" + "min":1 }, "ProviderType":{ "type":"string", - "enum":["Bitbucket"] + "enum":[ + "Bitbucket", + "GitHubEnterpriseServer" + ] }, "ResourceNotFoundException":{ "type":"structure", @@ -244,7 +542,160 @@ }, "documentation":"

Resource not found. Verify the connection resource ARN and try again.

", "exception":true + }, + "ResourceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Resource not found. Verify the ARN for the host resource and try again.

", + "exception":true + }, + "SecurityGroupId":{ + "type":"string", + "pattern":"sg-\\w{8}(\\w{9})?" + }, + "SecurityGroupIds":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":10, + "min":1 + }, + "SubnetId":{ + "type":"string", + "pattern":"subnet-\\w{8}(\\w{9})?" + }, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"}, + "max":10, + "min":1 + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The tag's key.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The tag's value.

" + } + }, + "documentation":"

A tag is a key-value pair that is used to manage the resource.

This tag is available for use by AWS services that support tags.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource to which you want to add or update tags.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags you want to modify or add to the resource.

" + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TlsCertificate":{ + "type":"string", + "max":16384, + "min":1 + }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource to remove tags from.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The list of keys for the tags to be removed from the resource.

" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "Url":{ + "type":"string", + "max":512, + "min":1 + }, + "VpcConfiguration":{ + "type":"structure", + "required":[ + "VpcId", + "SubnetIds", + "SecurityGroupIds" + ], + "members":{ + "VpcId":{ + "shape":"VpcId", + "documentation":"

The ID of the Amazon VPC connected to the infrastructure where your provider type is installed.

" + }, + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

The ID of the subnet or subnets associated with the Amazon VPC connected to the infrastructure where your provider type is installed.

" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

The ID of the security group or security groups associated with the Amazon VPC connected to the infrastructure where your provider type is installed.

" + }, + "TlsCertificate":{ + "shape":"TlsCertificate", + "documentation":"

The value of the Transport Layer Security (TLS) certificate associated with the infrastructure where your provider type is installed.

" + } + }, + "documentation":"

The VPC configuration provisioned for the host.

" + }, + "VpcId":{ + "type":"string", + "pattern":"vpc-\\w{8}(\\w{9})?" } }, - "documentation":"

This AWS CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Connections API. You can use the Connections API to work with connections and installations.

Connections are configurations that you use to connect AWS resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection.

When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket Cloud app. When you create a connection, you can choose an existing installation or create one.

You can work with connections by calling:

  • CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline.

  • DeleteConnection, which deletes the specified connection.

  • GetConnection, which returns information about the connection, including the connection status.

  • ListConnections, which lists the connections associated with your account.

For information about how to use AWS CodeStar Connections, see the AWS CodePipeline User Guide.

" + "documentation":"AWS CodeStar Connections

The CodeStar Connections feature is in preview release and is subject to change.

This AWS CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Connections API. You can use the connections API to work with connections and installations.

Connections are configurations that you use to connect AWS resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection.

When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket Cloud app. When you create a connection, you can choose an existing installation or create one.

When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections.

You can work with connections by calling:

  • CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline.

  • DeleteConnection, which deletes the specified connection.

  • GetConnection, which returns information about the connection, including the connection status.

  • ListConnections, which lists the connections associated with your account.

You can work with hosts by calling:

  • CreateHost, which creates a host that represents the infrastructure where your provider is installed.

  • DeleteHost, which deletes the specified host.

  • GetHost, which returns information about the host, including the setup status.

  • ListHosts, which lists the hosts associated with your account.

You can work with tags in AWS CodeStar Connections by calling the following:

  • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeStar Connections.

  • TagResource, which adds or updates tags for a resource in AWS CodeStar Connections.

  • UntagResource, which removes tags for a resource in AWS CodeStar Connections.

For information about how to use AWS CodeStar Connections, see the Developer Tools User Guide.

" } diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index cb03ac95e5e3..7b7c363f5194 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index 76346109a014..63be7687eae4 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index 1d567524beb0..948093593a62 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json index 13af0ec55196..309708fce169 100755 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json @@ -284,6 +284,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"UserNotFoundException"}, {"shape":"AliasExistsException"}, + {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], "documentation":"

Links an existing user account in a user pool (DestinationUser) to an identity from an external identity provider (SourceUser) based on a specified attribute name and value from the external identity provider. This allows you to create a link from the existing user account to an external federated user identity that has not yet been used to sign in, so that the federated user identity can be used to sign in as the existing user account.

For example, if there is an existing user with a username and password, this API links that user to a federated user identity, so that when the federated user identity is used, the user signs in as the existing user account.

Because this API allows a user with an external federated identity to sign in as an existing user in the user pool, it is critical that it only be used with external identity providers and provider attributes that have been trusted by the application owner.

See also .

This action is enabled only for admin access and requires developer credentials.

" @@ -1326,7 +1327,8 @@ {"shape":"InvalidSmsRoleAccessPolicyException"}, {"shape":"InvalidSmsRoleTrustRelationshipException"} ], - "documentation":"

Initiates the authentication flow.

" + "documentation":"

Initiates the authentication flow.

", + "authtype":"none" }, "ListDevices":{ "name":"ListDevices", @@ -1558,7 +1560,8 @@ {"shape":"InternalErrorException"}, {"shape":"SoftwareTokenMFANotFoundException"} ], - "documentation":"

Responds to the authentication challenge.

" + "documentation":"

Responds to the authentication challenge.

", + "authtype":"none" }, "SetRiskConfiguration":{ "name":"SetRiskConfiguration", @@ -3187,10 +3190,7 @@ "AuthParametersType":{ "type":"map", "key":{"shape":"StringType"}, - "value":{"shape":"AuthParametersValueType"} - }, - "AuthParametersValueType":{ - "type":"string", + "value":{"shape":"StringType"}, "sensitive":true }, "AuthenticationResultType":{ diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index 56c8e673b0cd..340bada45afa 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index bc89b817f38a..77802fbdb909 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehend/src/main/resources/codegen-resources/service-2.json b/services/comprehend/src/main/resources/codegen-resources/service-2.json index a5607e55abf2..f01ceefb428e 100644 --- a/services/comprehend/src/main/resources/codegen-resources/service-2.json +++ b/services/comprehend/src/main/resources/codegen-resources/service-2.json @@ -131,7 +131,7 @@ {"shape":"KmsKeyValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a new document classifier that you can use to categorize documents. To create a classifier you provide a set of training documents that labeled with the categories that you want to use. After the classifier is trained you can use it to categorize a set of labeled documents into the categories. For more information, see how-document-classification.

" + "documentation":"

Creates a new document classifier that you can use to categorize documents. To create a classifier, you provide a set of training documents that labeled with the categories that you want to use. After the classifier is trained you can use it to categorize a set of labeled documents into the categories. For more information, see how-document-classification.

" }, "CreateEndpoint":{ "name":"CreateEndpoint", @@ -395,6 +395,7 @@ "output":{"shape":"DetectEntitiesResponse"}, "errors":[ {"shape":"InvalidRequestException"}, + {"shape":"ResourceUnavailableException"}, {"shape":"TextSizeLimitExceededException"}, {"shape":"UnsupportedLanguageException"}, {"shape":"InternalServerException"} @@ -874,7 +875,7 @@ "required":["TextList"], "members":{ "TextList":{ - "shape":"StringList", + "shape":"CustomerInputStringList", "documentation":"

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document should contain at least 20 characters and must contain fewer than 5,000 bytes of UTF-8 encoded characters.

" } } @@ -894,7 +895,8 @@ "shape":"BatchItemErrorList", "documentation":"

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

" } - } + }, + "sensitive":true }, "BatchDetectEntitiesItemResult":{ "type":"structure", @@ -918,7 +920,7 @@ ], "members":{ "TextList":{ - "shape":"StringList", + "shape":"CustomerInputStringList", "documentation":"

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer than 5,000 bytes of UTF-8 encoded characters.

" }, "LanguageCode":{ @@ -942,7 +944,8 @@ "shape":"BatchItemErrorList", "documentation":"

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

" } - } + }, + "sensitive":true }, "BatchDetectKeyPhrasesItemResult":{ "type":"structure", @@ -966,7 +969,7 @@ ], "members":{ "TextList":{ - "shape":"StringList", + "shape":"CustomerInputStringList", "documentation":"

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.

" }, "LanguageCode":{ @@ -990,7 +993,8 @@ "shape":"BatchItemErrorList", "documentation":"

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

" } - } + }, + "sensitive":true }, "BatchDetectSentimentItemResult":{ "type":"structure", @@ -1018,7 +1022,7 @@ ], "members":{ "TextList":{ - "shape":"StringList", + "shape":"CustomerInputStringList", "documentation":"

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.

" }, "LanguageCode":{ @@ -1042,7 +1046,8 @@ "shape":"BatchItemErrorList", "documentation":"

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

" } - } + }, + "sensitive":true }, "BatchDetectSyntaxItemResult":{ "type":"structure", @@ -1066,7 +1071,7 @@ ], "members":{ "TextList":{ - "shape":"StringList", + "shape":"CustomerInputStringList", "documentation":"

A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer that 5,000 bytes of UTF-8 encoded characters.

" }, "LanguageCode":{ @@ -1090,7 +1095,8 @@ "shape":"BatchItemErrorList", "documentation":"

A list containing one object for each document that contained an error. The results are sorted in ascending order by the Index field and match the order of the documents in the input list. If there are no errors in the batch, the ErrorList is empty.

" } - } + }, + "sensitive":true }, "BatchItemError":{ "type":"structure", @@ -1173,14 +1179,15 @@ }, "NumberOfTestDocuments":{ "shape":"Integer", - "documentation":"

The number of documents in the input data that were used to test the classifier. Typically this is 10 to 20 percent of the input documents.

" + "documentation":"

The number of documents in the input data that were used to test the classifier. Typically this is 10 to 20 percent of the input documents, up to 10,000 documents.

" }, "EvaluationMetrics":{ "shape":"ClassifierEvaluationMetrics", "documentation":"

Describes the result metrics for the test data associated with an documentation classifier.

" } }, - "documentation":"

Provides information about a document classifier.

" + "documentation":"

Provides information about a document classifier.

", + "sensitive":true }, "ClassifyDocumentRequest":{ "type":"structure", @@ -1190,7 +1197,7 @@ ], "members":{ "Text":{ - "shape":"String", + "shape":"CustomerInputString", "documentation":"

The document text to be analyzed.

" }, "EndpointArn":{ @@ -1210,7 +1217,8 @@ "shape":"ListOfLabels", "documentation":"

The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not multually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.

" } - } + }, + "sensitive":true }, "ClientRequestTokenString":{ "type":"string", @@ -1231,7 +1239,7 @@ "ComprehendEndpointArn":{ "type":"string", "max":256, - "pattern":"arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:document-classifier-endpoint/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:(document-classifier-endpoint|entity-recognizer-endpoint)/[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, "ComprehendEndpointName":{ "type":"string", @@ -1241,7 +1249,7 @@ "ComprehendModelArn":{ "type":"string", "max":256, - "pattern":"arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:document-classifier/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:(document-classifier|entity-recognizer)/[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, "ConcurrentModificationException":{ "type":"structure", @@ -1405,6 +1413,16 @@ } } }, + "CustomerInputString":{ + "type":"string", + "min":1, + "sensitive":true + }, + "CustomerInputStringList":{ + "type":"list", + "member":{"shape":"CustomerInputString"}, + "sensitive":true + }, "DeleteDocumentClassifierRequest":{ "type":"structure", "required":["DocumentClassifierArn"], @@ -1626,7 +1644,7 @@ "required":["Text"], "members":{ "Text":{ - "shape":"String", + "shape":"CustomerInputString", "documentation":"

A UTF-8 text string. Each string should contain at least 20 characters and must contain fewer that 5,000 bytes of UTF-8 encoded characters.

" } } @@ -1638,22 +1656,24 @@ "shape":"ListOfDominantLanguages", "documentation":"

The languages that Amazon Comprehend detected in the input text. For each language, the response returns the RFC 5646 language code and the level of confidence that Amazon Comprehend has in the accuracy of its inference. For more information about RFC 5646, see Tags for Identifying Languages on the IETF Tools web site.

" } - } + }, + "sensitive":true }, "DetectEntitiesRequest":{ "type":"structure", - "required":[ - "Text", - "LanguageCode" - ], + "required":["Text"], "members":{ "Text":{ - "shape":"String", + "shape":"CustomerInputString", "documentation":"

A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.

" }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.

" + "documentation":"

The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language.

If your request includes the endpoint for a custom entity recognition model, Amazon Comprehend uses the language of your custom model, and it ignores any language code that you specify here.

" + }, + "EndpointArn":{ + "shape":"EntityRecognizerEndpointArn", + "documentation":"

The Amazon Resource Name of an endpoint that is associated with a custom entity recognition model. Provide an endpoint if you want to detect entities by using your own custom model instead of the default model that is used by Amazon Comprehend.

If you specify an endpoint, Amazon Comprehend uses the language of your custom model, and it ignores any language code that you provide in your request.

" } } }, @@ -1662,9 +1682,10 @@ "members":{ "Entities":{ "shape":"ListOfEntities", - "documentation":"

A collection of entities identified in the input text. For each entity, the response provides the entity text, entity type, where the entity text begins and ends, and the level of confidence that Amazon Comprehend has in the detection. For a list of entity types, see how-entities.

" + "documentation":"

A collection of entities identified in the input text. For each entity, the response provides the entity text, entity type, where the entity text begins and ends, and the level of confidence that Amazon Comprehend has in the detection.

If your request uses a custom entity recognition model, Amazon Comprehend detects the entities that the model is trained to recognize. Otherwise, it detects the default entity types. For a list of default entity types, see how-entities.

" } - } + }, + "sensitive":true }, "DetectKeyPhrasesRequest":{ "type":"structure", @@ -1674,7 +1695,7 @@ ], "members":{ "Text":{ - "shape":"String", + "shape":"CustomerInputString", "documentation":"

A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.

" }, "LanguageCode":{ @@ -1690,7 +1711,8 @@ "shape":"ListOfKeyPhrases", "documentation":"

A collection of key phrases that Amazon Comprehend identified in the input text. For each key phrase, the response provides the text of the key phrase, where the key phrase begins and ends, and the level of confidence that Amazon Comprehend has in the accuracy of the detection.

" } - } + }, + "sensitive":true }, "DetectSentimentRequest":{ "type":"structure", @@ -1700,7 +1722,7 @@ ], "members":{ "Text":{ - "shape":"String", + "shape":"CustomerInputString", "documentation":"

A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters.

" }, "LanguageCode":{ @@ -1720,7 +1742,8 @@ "shape":"SentimentScore", "documentation":"

An object that lists the sentiments, and their corresponding confidence levels.

" } - } + }, + "sensitive":true }, "DetectSyntaxRequest":{ "type":"structure", @@ -1730,7 +1753,7 @@ ], "members":{ "Text":{ - "shape":"String", + "shape":"CustomerInputString", "documentation":"

A UTF-8 string. Each string must contain fewer that 5,000 bytes of UTF encoded characters.

" }, "LanguageCode":{ @@ -1746,7 +1769,8 @@ "shape":"ListOfSyntaxTokens", "documentation":"

A collection of syntax tokens describing the text. For each token, the response provides the text, the token type, where the text begins and ends, and the level of confidence that Amazon Comprehend has that the token is correct. For a list of token types, see how-syntax.

" } - } + }, + "sensitive":true }, "DocumentClass":{ "type":"structure", @@ -1775,11 +1799,11 @@ }, "SubmitTimeBefore":{ "shape":"Timestamp", - "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in ascending order, oldest to newest.

" + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in ascending order, oldest to newest.

" }, "SubmitTimeAfter":{ "shape":"Timestamp", - "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted before the specified time. Jobs are returned in descending order, newest to oldest.

" + "documentation":"

Filters the list of jobs based on the time that the job was submitted for processing. Returns only jobs submitted after the specified time. Jobs are returned in descending order, newest to oldest.

" } }, "documentation":"

Provides information for filtering a list of document classification jobs. For more information, see the operation. You can provide only one filter parameter in each request.

" @@ -2292,6 +2316,11 @@ }, "documentation":"

Describes the training documents submitted with an entity recognizer.

" }, + "EntityRecognizerEndpointArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:entity-recognizer-endpoint/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, "EntityRecognizerEntityList":{ "type":"structure", "required":["S3Uri"], @@ -2385,7 +2414,8 @@ "documentation":"

Entity types from the metadata of an entity recognizer.

" } }, - "documentation":"

Detailed information about an entity recognizer.

" + "documentation":"

Detailed information about an entity recognizer.

", + "sensitive":true }, "EntityRecognizerMetadataEntityTypesList":{ "type":"list", @@ -3166,7 +3196,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified name is already in use. Use a different name and try your request again.

", + "documentation":"

The specified resource name is already in use. Use a different name and try your request again.

", "exception":true }, "ResourceLimitExceededException":{ @@ -3174,7 +3204,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The maximum number of recognizers per account has been exceeded. Review the recognizers, perform cleanup, and then try your request again.

", + "documentation":"

The maximum number of resources per account has been exceeded. Review the resources, and then try your request again.

", "exception":true }, "ResourceNotFoundException":{ @@ -3190,7 +3220,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified resource is not available. Check to see if the resource is in the TRAINED state and try your request again.

", + "documentation":"

The specified resource is not available. Check the resource and try your request again.

", "exception":true }, "S3Uri":{ @@ -3787,10 +3817,6 @@ "type":"string", "min":1 }, - "StringList":{ - "type":"list", - "member":{"shape":"String"} - }, "SubnetId":{ "type":"string", "max":32, diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index b517247a92f3..f80f08c6c9b0 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/comprehendmedical/src/main/resources/codegen-resources/service-2.json b/services/comprehendmedical/src/main/resources/codegen-resources/service-2.json index a5d180ef103b..d796a2caf543 100644 --- a/services/comprehendmedical/src/main/resources/codegen-resources/service-2.json +++ b/services/comprehendmedical/src/main/resources/codegen-resources/service-2.json @@ -30,6 +30,22 @@ ], "documentation":"

Gets the properties associated with a medical entities detection job. Use this operation to get the status of a detection job.

" }, + "DescribeICD10CMInferenceJob":{ + "name":"DescribeICD10CMInferenceJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeICD10CMInferenceJobRequest"}, + "output":{"shape":"DescribeICD10CMInferenceJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets the properties associated with an InferICD10CM job. Use this operation to get the status of an inference job.

" + }, "DescribePHIDetectionJob":{ "name":"DescribePHIDetectionJob", "http":{ @@ -46,6 +62,22 @@ ], "documentation":"

Gets the properties associated with a protected health information (PHI) detection job. Use this operation to get the status of a detection job.

" }, + "DescribeRxNormInferenceJob":{ + "name":"DescribeRxNormInferenceJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRxNormInferenceJobRequest"}, + "output":{"shape":"DescribeRxNormInferenceJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets the properties associated with an InferRxNorm job. Use this operation to get the status of an inference job.

" + }, "DetectEntities":{ "name":"DetectEntities", "http":{ @@ -154,6 +186,22 @@ ], "documentation":"

Gets a list of medical entity detection jobs that you have submitted.

" }, + "ListICD10CMInferenceJobs":{ + "name":"ListICD10CMInferenceJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListICD10CMInferenceJobsRequest"}, + "output":{"shape":"ListICD10CMInferenceJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a list of InferICD10CM jobs that you have submitted.

" + }, "ListPHIDetectionJobs":{ "name":"ListPHIDetectionJobs", "http":{ @@ -170,6 +218,22 @@ ], "documentation":"

Gets a list of protected health information (PHI) detection jobs that you have submitted.

" }, + "ListRxNormInferenceJobs":{ + "name":"ListRxNormInferenceJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRxNormInferenceJobsRequest"}, + "output":{"shape":"ListRxNormInferenceJobsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a list of InferRxNorm jobs that you have submitted.

" + }, "StartEntitiesDetectionV2Job":{ "name":"StartEntitiesDetectionV2Job", "http":{ @@ -186,6 +250,22 @@ ], "documentation":"

Starts an asynchronous medical entity detection job for a collection of documents. Use the DescribeEntitiesDetectionV2Job operation to track the status of a job.

" }, + "StartICD10CMInferenceJob":{ + "name":"StartICD10CMInferenceJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartICD10CMInferenceJobRequest"}, + "output":{"shape":"StartICD10CMInferenceJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an asynchronous job to detect medical conditions and link them to the ICD-10-CM ontology. Use the DescribeICD10CMInferenceJob operation to track the status of a job.

" + }, "StartPHIDetectionJob":{ "name":"StartPHIDetectionJob", "http":{ @@ -202,6 +282,22 @@ ], "documentation":"

Starts an asynchronous job to detect protected health information (PHI). Use the DescribePHIDetectionJob operation to track the status of a job.

" }, + "StartRxNormInferenceJob":{ + "name":"StartRxNormInferenceJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartRxNormInferenceJobRequest"}, + "output":{"shape":"StartRxNormInferenceJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an asynchronous job to detect medication entities and link them to the RxNorm ontology. Use the DescribeRxNormInferenceJob operation to track the status of a job.

" + }, "StopEntitiesDetectionV2Job":{ "name":"StopEntitiesDetectionV2Job", "http":{ @@ -217,6 +313,21 @@ ], "documentation":"

Stops a medical entities detection job in progress.

" }, + "StopICD10CMInferenceJob":{ + "name":"StopICD10CMInferenceJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopICD10CMInferenceJobRequest"}, + "output":{"shape":"StopICD10CMInferenceJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops an InferICD10CM inference job in progress.

" + }, "StopPHIDetectionJob":{ "name":"StopPHIDetectionJob", "http":{ @@ -231,6 +342,21 @@ {"shape":"InternalServerException"} ], "documentation":"

Stops a protected health information (PHI) detection job in progress.

" + }, + "StopRxNormInferenceJob":{ + "name":"StopRxNormInferenceJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopRxNormInferenceJobRequest"}, + "output":{"shape":"StopRxNormInferenceJobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops an InferRxNorm inference job in progress.

" } }, "shapes":{ @@ -412,6 +538,25 @@ } } }, + "DescribeICD10CMInferenceJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier that Amazon Comprehend Medical generated for the job. The StartICD10CMInferenceJob operation returns this identifier in its response.

" + } + } + }, + "DescribeICD10CMInferenceJobResponse":{ + "type":"structure", + "members":{ + "ComprehendMedicalAsyncJobProperties":{ + "shape":"ComprehendMedicalAsyncJobProperties", + "documentation":"

An object that contains the properties associated with a detection job.

" + } + } + }, "DescribePHIDetectionJobRequest":{ "type":"structure", "required":["JobId"], @@ -431,6 +576,25 @@ } } }, + "DescribeRxNormInferenceJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier that Amazon Comprehend Medical generated for the job. The StartRxNormInferenceJob operation returns this identifier in its response.

" + } + } + }, + "DescribeRxNormInferenceJobResponse":{ + "type":"structure", + "members":{ + "ComprehendMedicalAsyncJobProperties":{ + "shape":"ComprehendMedicalAsyncJobProperties", + "documentation":"

An object that contains the properties associated with a detection job.

" + } + } + }, "DetectEntitiesRequest":{ "type":"structure", "required":["Text"], @@ -863,7 +1027,7 @@ "documentation":"

The path to the input data files in the S3 bucket.

" } }, - "documentation":"

The input properties for an entities detection job.

" + "documentation":"

The input properties for an entities detection job. This includes the name of the S3 bucket and the path to the files to be analyzed. See batch-manifest for more information.

" }, "Integer":{"type":"integer"}, "InternalServerException":{ @@ -955,6 +1119,36 @@ } } }, + "ListICD10CMInferenceJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"ComprehendMedicalAsyncJobFilter", + "documentation":"

Filters the jobs that are returned. You can filter jobs based on their names, status, or the date and time that they were submitted. You can only set one filter at a time.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

The maximum number of results to return in each page. The default is 100.

" + } + } + }, + "ListICD10CMInferenceJobsResponse":{ + "type":"structure", + "members":{ + "ComprehendMedicalAsyncJobPropertiesList":{ + "shape":"ComprehendMedicalAsyncJobPropertiesList", + "documentation":"

A list containing the properties of each job that is returned.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, "ListPHIDetectionJobsRequest":{ "type":"structure", "members":{ @@ -985,6 +1179,36 @@ } } }, + "ListRxNormInferenceJobsRequest":{ + "type":"structure", + "members":{ + "Filter":{ + "shape":"ComprehendMedicalAsyncJobFilter", + "documentation":"

Filters the jobs that are returned. You can filter jobs based on their names, status, or the date and time that they were submitted. You can only set one filter at a time.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + }, + "MaxResults":{ + "shape":"MaxResultsInteger", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, + "ListRxNormInferenceJobsResponse":{ + "type":"structure", + "members":{ + "ComprehendMedicalAsyncJobPropertiesList":{ + "shape":"ComprehendMedicalAsyncJobPropertiesList", + "documentation":"

The maximum number of results to return in each page. The default is 100.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

Identifies the next page of results to return.

" + } + } + }, "ManifestFilePath":{ "type":"string", "max":4096, @@ -1035,7 +1259,8 @@ "ACUITY", "TEST_VALUE", "TEST_UNITS", - "DIRECTION" + "DIRECTION", + "SYSTEM_ORGAN_SITE" ] }, "ResourceNotFoundException":{ @@ -1273,6 +1498,55 @@ } } }, + "StartICD10CMInferenceJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "LanguageCode" + ], + "members":{ + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Specifies the format and location of the input data for the job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies where to send the output files.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. For more information, see Role-Based Permissions Required for Asynchronous Operations.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The identifier of the job.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "documentation":"

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend Medical generates one.

", + "idempotencyToken":true + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language of the input documents. All documents must be in the same language.

" + } + } + }, + "StartICD10CMInferenceJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier generated for the job. To get the status of a job, use this identifier with the StartICD10CMInferenceJob operation.

" + } + } + }, "StartPHIDetectionJobRequest":{ "type":"structure", "required":[ @@ -1322,6 +1596,55 @@ } } }, + "StartRxNormInferenceJobRequest":{ + "type":"structure", + "required":[ + "InputDataConfig", + "OutputDataConfig", + "DataAccessRoleArn", + "LanguageCode" + ], + "members":{ + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

Specifies the format and location of the input data for the job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

Specifies where to send the output files.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. For more information, see Role-Based Permissions Required for Asynchronous Operations.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The identifier of the job.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestTokenString", + "documentation":"

A unique identifier for the request. If you don't set the client request token, Amazon Comprehend Medical generates one.

", + "idempotencyToken":true + }, + "KMSKey":{ + "shape":"KMSKey", + "documentation":"

An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language of the input documents. All documents must be in the same language.

" + } + } + }, + "StartRxNormInferenceJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the job.

" + } + } + }, "StopEntitiesDetectionV2JobRequest":{ "type":"structure", "required":["JobId"], @@ -1341,6 +1664,25 @@ } } }, + "StopICD10CMInferenceJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the job.

" + } + } + }, + "StopICD10CMInferenceJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier generated for the job. To get the status of job, use this identifier with the DescribeICD10CMInferenceJob operation.

" + } + } + }, "StopPHIDetectionJobRequest":{ "type":"structure", "required":["JobId"], @@ -1360,6 +1702,25 @@ } } }, + "StopRxNormInferenceJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the job.

" + } + } + }, + "StopRxNormInferenceJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier generated for the job. To get the status of job, use this identifier with the DescribeRxNormInferenceJob operation.

" + } + } + }, "String":{ "type":"string", "min":1 diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index eaf255112ba4..5b92b6895754 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json index 7a83d941dbde..9b447ed998c1 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json +++ b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,66 @@ "uid":"compute-optimizer-2019-11-01" }, "operations":{ + "DescribeRecommendationExportJobs":{ + "name":"DescribeRecommendationExportJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRecommendationExportJobsRequest"}, + "output":{"shape":"DescribeRecommendationExportJobsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Describes recommendation export jobs created in the last seven days.

Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your recommendations. Then use the DescribeRecommendationExportJobs action to view your export jobs.

" + }, + "ExportAutoScalingGroupRecommendations":{ + "name":"ExportAutoScalingGroupRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportAutoScalingGroupRecommendationsRequest"}, + "output":{"shape":"ExportAutoScalingGroupRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Exports optimization recommendations for Auto Scaling groups.

Recommendations are exported in a comma-separated values (.csv) file, and its metadata in a JavaScript Object Notation (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

You can have only one Auto Scaling group export job in progress per AWS Region.

" + }, + "ExportEC2InstanceRecommendations":{ + "name":"ExportEC2InstanceRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportEC2InstanceRecommendationsRequest"}, + "output":{"shape":"ExportEC2InstanceRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Exports optimization recommendations for Amazon EC2 instances.

Recommendations are exported in a comma-separated values (.csv) file, and its metadata in a JavaScript Object Notation (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

You can have only one Amazon EC2 instance export job in progress per AWS Region.

" + }, "GetAutoScalingGroupRecommendations":{ "name":"GetAutoScalingGroupRecommendations", "http":{ @@ -89,7 +149,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is a master account of an organization, this operation also confirms the enrollment status of member accounts within the organization.

" + "documentation":"

Returns the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is the master account of an organization, this action also confirms the enrollment status of member accounts within the organization.

" }, "GetRecommendationSummaries":{ "name":"GetRecommendationSummaries", @@ -126,7 +186,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is a master account of an organization, this operation can also enroll member accounts within the organization.

" + "documentation":"

Updates the enrollment (opt in) status of an account to the AWS Compute Optimizer service.

If the account is a master account of an organization, this action can also be used to enroll member accounts within the organization.

" } }, "shapes":{ @@ -245,19 +305,238 @@ "member":{"shape":"AutoScalingGroupRecommendation"} }, "Code":{"type":"string"}, + "CreationTimestamp":{"type":"timestamp"}, "CurrentInstanceType":{"type":"string"}, + "DescribeRecommendationExportJobsRequest":{ + "type":"structure", + "members":{ + "jobIds":{ + "shape":"JobIds", + "documentation":"

The identification numbers of the export jobs to return.

An export job ID is returned when you create an export using the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions.

All export jobs created in the last seven days are returned if this parameter is omitted.

" + }, + "filters":{ + "shape":"JobFilters", + "documentation":"

An array of objects that describe a filter to return a more specific list of export jobs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to advance to the next page of export jobs.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of export jobs to return with a single request.

To retrieve the remaining results, make another request with the returned NextToken value.

" + } + } + }, + "DescribeRecommendationExportJobsResponse":{ + "type":"structure", + "members":{ + "recommendationExportJobs":{ + "shape":"RecommendationExportJobs", + "documentation":"

An array of objects that describe recommendation export jobs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to advance to the next page of export jobs.

This value is null when there are no more pages of export jobs to return.

" + } + } + }, "DesiredCapacity":{"type":"integer"}, + "DestinationBucket":{"type":"string"}, + "DestinationKey":{"type":"string"}, + "DestinationKeyPrefix":{"type":"string"}, "ErrorMessage":{"type":"string"}, + "ExportAutoScalingGroupRecommendationsRequest":{ + "type":"structure", + "required":["s3DestinationConfig"], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

The IDs of the AWS accounts for which to export Auto Scaling group recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" + }, + "filters":{ + "shape":"Filters", + "documentation":"

An array of objects that describe a filter to export a more specific set of Auto Scaling group recommendations.

" + }, + "fieldsToExport":{ + "shape":"ExportableAutoScalingGroupFields", + "documentation":"

The recommendations data to include in the export file.

" + }, + "s3DestinationConfig":{ + "shape":"S3DestinationConfig", + "documentation":"

An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket name and key prefix for the export job.

You must create the destination Amazon S3 bucket for your recommendations export before you create the export job. Compute Optimizer does not create the S3 bucket for you. After you create the S3 bucket, ensure that it has the required permission policy to allow Compute Optimizer to write the export file to it. If you plan to specify an object prefix when you create the export job, you must include the object prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the Compute Optimizer user guide.

" + }, + "fileFormat":{ + "shape":"FileFormat", + "documentation":"

The format of the export file.

The only export file format currently supported is Csv.

" + }, + "includeMemberAccounts":{ + "shape":"IncludeMemberAccounts", + "documentation":"

Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the master account of an organization.

The member accounts must also be opted in to Compute Optimizer.

Recommendations for member accounts of the organization are not included in the export file if this parameter is omitted.

This parameter cannot be specified together with the account IDs parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the account IDs parameter, is omitted.

" + } + } + }, + "ExportAutoScalingGroupRecommendationsResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The identification number of the export job.

Use the DescribeRecommendationExportJobs action, and specify the job ID to view the status of an export job.

" + }, + "s3Destination":{ + "shape":"S3Destination", + "documentation":"

An object that describes the destination Amazon S3 bucket of a recommendations export file.

" + } + } + }, + "ExportDestination":{ + "type":"structure", + "members":{ + "s3":{ + "shape":"S3Destination", + "documentation":"

An object that describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and object keys of a recommendations export file, and its associated metadata file.

" + } + }, + "documentation":"

Describes the destination of the recommendations export and metadata files.

" + }, + "ExportEC2InstanceRecommendationsRequest":{ + "type":"structure", + "required":["s3DestinationConfig"], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

The IDs of the AWS accounts for which to export instance recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to export recommendations.

This parameter cannot be specified together with the include member accounts parameter. The parameters are mutually exclusive.

Recommendations for member accounts are not included in the export if this parameter, or the include member accounts parameter, is omitted.

You can specify multiple account IDs per request.

" + }, + "filters":{ + "shape":"Filters", + "documentation":"

An array of objects that describe a filter to export a more specific set of instance recommendations.

" + }, + "fieldsToExport":{ + "shape":"ExportableInstanceFields", + "documentation":"

The recommendations data to include in the export file.

" + }, + "s3DestinationConfig":{ + "shape":"S3DestinationConfig", + "documentation":"

An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket name and key prefix for the export job.

You must create the destination Amazon S3 bucket for your recommendations export before you create the export job. Compute Optimizer does not create the S3 bucket for you. After you create the S3 bucket, ensure that it has the required permission policy to allow Compute Optimizer to write the export file to it. If you plan to specify an object prefix when you create the export job, you must include the object prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the Compute Optimizer user guide.

" + }, + "fileFormat":{ + "shape":"FileFormat", + "documentation":"

The format of the export file.

The only export file format currently supported is Csv.

" + }, + "includeMemberAccounts":{ + "shape":"IncludeMemberAccounts", + "documentation":"

Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the master account of an organization.

The member accounts must also be opted in to Compute Optimizer.

Recommendations for member accounts of the organization are not included in the export file if this parameter is omitted.

Recommendations for member accounts are not included in the export if this parameter, or the account IDs parameter, is omitted.

" + } + } + }, + "ExportEC2InstanceRecommendationsResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The identification number of the export job.

Use the DescribeRecommendationExportJobs action, and specify the job ID to view the status of an export job.

" + }, + "s3Destination":{ + "shape":"S3Destination", + "documentation":"

An object that describes the destination Amazon S3 bucket of a recommendations export file.

" + } + } + }, + "ExportableAutoScalingGroupField":{ + "type":"string", + "enum":[ + "AccountId", + "AutoScalingGroupArn", + "AutoScalingGroupName", + "Finding", + "UtilizationMetricsCpuMaximum", + "UtilizationMetricsMemoryMaximum", + "LookbackPeriodInDays", + "CurrentConfigurationInstanceType", + "CurrentConfigurationDesiredCapacity", + "CurrentConfigurationMinSize", + "CurrentConfigurationMaxSize", + "CurrentOnDemandPrice", + "CurrentStandardOneYearNoUpfrontReservedPrice", + "CurrentStandardThreeYearNoUpfrontReservedPrice", + "CurrentVCpus", + "CurrentMemory", + "CurrentStorage", + "CurrentNetwork", + "RecommendationOptionsConfigurationInstanceType", + "RecommendationOptionsConfigurationDesiredCapacity", + "RecommendationOptionsConfigurationMinSize", + "RecommendationOptionsConfigurationMaxSize", + "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum", + "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum", + "RecommendationOptionsPerformanceRisk", + "RecommendationOptionsOnDemandPrice", + "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice", + "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice", + "RecommendationOptionsVcpus", + "RecommendationOptionsMemory", + "RecommendationOptionsStorage", + "RecommendationOptionsNetwork", + "LastRefreshTimestamp" + ] + }, + "ExportableAutoScalingGroupFields":{ + "type":"list", + "member":{"shape":"ExportableAutoScalingGroupField"} + }, + "ExportableInstanceField":{ + "type":"string", + "enum":[ + "AccountId", + "InstanceArn", + "InstanceName", + "Finding", + "LookbackPeriodInDays", + "CurrentInstanceType", + "UtilizationMetricsCpuMaximum", + "UtilizationMetricsMemoryMaximum", + "CurrentOnDemandPrice", + "CurrentStandardOneYearNoUpfrontReservedPrice", + "CurrentStandardThreeYearNoUpfrontReservedPrice", + "CurrentVCpus", + "CurrentMemory", + "CurrentStorage", + "CurrentNetwork", + "RecommendationOptionsInstanceType", + "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum", + "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum", + "RecommendationOptionsPerformanceRisk", + "RecommendationOptionsVcpus", + "RecommendationOptionsMemory", + "RecommendationOptionsStorage", + "RecommendationOptionsNetwork", + "RecommendationOptionsOnDemandPrice", + "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice", + "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice", + "RecommendationsSourcesRecommendationSourceArn", + "RecommendationsSourcesRecommendationSourceType", + "LastRefreshTimestamp" + ] + }, + "ExportableInstanceFields":{ + "type":"list", + "member":{"shape":"ExportableInstanceField"} + }, + "FailureReason":{"type":"string"}, + "FileFormat":{ + "type":"string", + "enum":["Csv"] + }, "Filter":{ "type":"structure", "members":{ "name":{ "shape":"FilterName", - "documentation":"

The name of the filter.

Specify Finding to filter the results to a specific findings classification.

Specify RecommendationSourceType to filter the results to a specific resource type.

" + "documentation":"

The name of the filter.

Specify Finding to return recommendations with a specific findings classification (e.g., Overprovisioned).

Specify RecommendationSourceType to return recommendations of a specific resource type (e.g., AutoScalingGroup).

" }, "values":{ "shape":"FilterValues", - "documentation":"

The value of the filter.

If you specify the name parameter as Finding, and you're recommendations for an instance, then the valid values are Underprovisioned, Overprovisioned, NotOptimized, or Optimized.

If you specify the name parameter as Finding, and you're recommendations for an Auto Scaling group, then the valid values are Optimized, or NotOptimized.

If you specify the name parameter as RecommendationSourceType, then the valid values are EC2Instance, or AutoScalingGroup.

" + "documentation":"

The value of the filter.

If you specify the name parameter as Finding, and you request recommendations for an instance, then the valid values are Underprovisioned, Overprovisioned, NotOptimized, or Optimized.

If you specify the name parameter as Finding, and you request recommendations for an Auto Scaling group, then the valid values are Optimized, or NotOptimized.

If you specify the name parameter as RecommendationSourceType, then the valid values are Ec2Instance, or AutoScalingGroup.

" } }, "documentation":"

Describes a filter that returns a more specific list of recommendations.

" @@ -292,7 +571,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The AWS account IDs for which to return Auto Scaling group recommendations.

Only one account ID can be specified per request.

" + "documentation":"

The IDs of the AWS accounts for which to return Auto Scaling group recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to return Auto Scaling group recommendations.

Only one account ID can be specified per request.

" }, "autoScalingGroupArns":{ "shape":"AutoScalingGroupArns", @@ -304,7 +583,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of Auto Scaling group recommendations to return with a single call.

To retrieve the remaining results, make another call with the returned NextToken value.

" + "documentation":"

The maximum number of Auto Scaling group recommendations to return with a single request.

To retrieve the remaining results, make another request with the returned NextToken value.

" }, "filters":{ "shape":"Filters", @@ -342,7 +621,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of instance recommendations to return with a single call.

To retrieve the remaining results, make another call with the returned NextToken value.

" + "documentation":"

The maximum number of instance recommendations to return with a single request.

To retrieve the remaining results, make another request with the returned NextToken value.

" }, "filters":{ "shape":"Filters", @@ -350,7 +629,7 @@ }, "accountIds":{ "shape":"AccountIds", - "documentation":"

The AWS account IDs for which to return instance recommendations.

Only one account ID can be specified per request.

" + "documentation":"

The IDs of the AWS accounts for which to return instance recommendations.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to return instance recommendations.

Only one account ID can be specified per request.

" } } }, @@ -461,7 +740,7 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

The AWS account IDs for which to return recommendation summaries.

Only one account ID can be specified per request.

" + "documentation":"

The IDs of the AWS accounts for which to return recommendation summaries.

If your account is the master account of an organization, use this parameter to specify the member accounts for which you want to return recommendation summaries.

Only one account ID can be specified per request.

" }, "nextToken":{ "shape":"NextToken", @@ -469,7 +748,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of recommendation summaries to return with a single call.

To retrieve the remaining results, make another call with the returned NextToken value.

" + "documentation":"

The maximum number of recommendation summaries to return with a single request.

To retrieve the remaining results, make another request with the returned NextToken value.

" } } }, @@ -503,7 +782,7 @@ }, "accountId":{ "shape":"AccountId", - "documentation":"

The AWS account ID of the instance recommendation.

" + "documentation":"

The AWS account ID of the instance.

" }, "instanceName":{ "shape":"InstanceName", @@ -572,7 +851,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The request processing has failed because of an unknown error, exception, or failure.

", + "documentation":"

An internal error has occurred. Try your call again.

", "exception":true, "fault":true }, @@ -585,7 +864,56 @@ "exception":true, "synthetic":true }, + "JobFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"JobFilterName", + "documentation":"

The name of the filter.

Specify ResourceType to return export jobs of a specific resource type (e.g., Ec2Instance).

Specify JobStatus to return export jobs with a specific status (e.g, Complete).

" + }, + "values":{ + "shape":"FilterValues", + "documentation":"

The value of the filter.

If you specify the name parameter as ResourceType, the valid values are Ec2Instance or AutoScalingGroup.

If you specify the name parameter as JobStatus, the valid values are Queued, InProgress, Complete, or Failed.

" + } + }, + "documentation":"

Describes a filter that returns a more specific list of recommendation export jobs.

This filter is used with the DescribeRecommendationExportJobs action.

" + }, + "JobFilterName":{ + "type":"string", + "enum":[ + "ResourceType", + "JobStatus" + ] + }, + "JobFilters":{ + "type":"list", + "member":{"shape":"JobFilter"} + }, + "JobId":{"type":"string"}, + "JobIds":{ + "type":"list", + "member":{"shape":"JobId"} + }, + "JobStatus":{ + "type":"string", + "enum":[ + "Queued", + "InProgress", + "Complete", + "Failed" + ] + }, "LastRefreshTimestamp":{"type":"timestamp"}, + "LastUpdatedTimestamp":{"type":"timestamp"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request exceeds a limit of the service.

", + "exception":true, + "synthetic":true + }, "LookBackPeriodInDays":{"type":"double"}, "MaxResults":{ "type":"integer", @@ -594,6 +922,7 @@ "MaxSize":{"type":"integer"}, "MemberAccountsEnrolled":{"type":"boolean"}, "Message":{"type":"string"}, + "MetadataKey":{"type":"string"}, "MetricName":{ "type":"string", "enum":[ @@ -629,7 +958,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

You must opt in to the service to perform this action.

", + "documentation":"

The account is not opted in to AWS Compute Optimizer.

", "exception":true, "synthetic":true }, @@ -666,6 +995,44 @@ "member":{"shape":"UtilizationMetric"} }, "Rank":{"type":"integer"}, + "RecommendationExportJob":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The identification number of the export job.

" + }, + "destination":{ + "shape":"ExportDestination", + "documentation":"

An object that describes the destination of the export file.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The resource type of the exported recommendations.

" + }, + "status":{ + "shape":"JobStatus", + "documentation":"

The status of the export job.

" + }, + "creationTimestamp":{ + "shape":"CreationTimestamp", + "documentation":"

The timestamp of when the export job was created.

" + }, + "lastUpdatedTimestamp":{ + "shape":"LastUpdatedTimestamp", + "documentation":"

The timestamp of when the export job was last updated.

" + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

The reason for an export job failure.

" + } + }, + "documentation":"

Describes a recommendation export job.

Use the DescribeRecommendationExportJobs action to view your recommendation export jobs.

Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your recommendations.

" + }, + "RecommendationExportJobs":{ + "type":"list", + "member":{"shape":"RecommendationExportJob"} + }, "RecommendationOptions":{ "type":"list", "member":{"shape":"InstanceRecommendationOption"} @@ -746,10 +1113,49 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The specified resource was not found.

", + "documentation":"

A resource that is required for the action doesn't exist.

", "exception":true, "synthetic":true }, + "ResourceType":{ + "type":"string", + "enum":[ + "Ec2Instance", + "AutoScalingGroup" + ] + }, + "S3Destination":{ + "type":"structure", + "members":{ + "bucket":{ + "shape":"DestinationBucket", + "documentation":"

The name of the Amazon S3 bucket used as the destination of an export file.

" + }, + "key":{ + "shape":"DestinationKey", + "documentation":"

The Amazon S3 bucket key of an export file.

The key uniquely identifies the object, or export file, in the S3 bucket.

" + }, + "metadataKey":{ + "shape":"MetadataKey", + "documentation":"

The Amazon S3 bucket key of a metadata file.

The key uniquely identifies the object, or metadata file, in the S3 bucket.

" + } + }, + "documentation":"

Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and object keys of a recommendations export file, and its associated metadata file.

" + }, + "S3DestinationConfig":{ + "type":"structure", + "members":{ + "bucket":{ + "shape":"DestinationBucket", + "documentation":"

The name of the Amazon S3 bucket to use as the destination for an export job.

" + }, + "keyPrefix":{ + "shape":"DestinationKeyPrefix", + "documentation":"

The Amazon S3 bucket prefix for an export job.

" + } + }, + "documentation":"

Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and key prefix for a recommendations export job.

You must create the destination Amazon S3 bucket for your recommendations export before you create the export job. Compute Optimizer does not create the S3 bucket for you. After you create the S3 bucket, ensure that it has the required permission policy to allow Compute Optimizer to write the export file to it. If you plan to specify an object prefix when you create the export job, you must include the object prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the Compute Optimizer user guide.

" + }, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -794,7 +1200,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The limit on the number of requests per second was exceeded.

", + "documentation":"

The request was denied due to request throttling.

", "exception":true, "synthetic":true }, @@ -813,7 +1219,7 @@ }, "includeMemberAccounts":{ "shape":"IncludeMemberAccounts", - "documentation":"

Indicates whether to enroll member accounts within the organization, if the account is a master account of an organization.

" + "documentation":"

Indicates whether to enroll member accounts of the organization if the your account is the master account of an organization.

" } } }, @@ -853,5 +1259,5 @@ "member":{"shape":"UtilizationMetric"} } }, - "documentation":"

AWS Compute Optimizer is a service that analyzes the configuration and utilization metrics of your AWS resources, such as EC2 instances and Auto Scaling groups. It reports whether your resources are optimal, and generates optimization recommendations to reduce the cost and improve the performance of your workloads. Compute Optimizer also provides recent utilization metric data, as well as projected utilization metric data for the recommendations, which you can use to evaluate which recommendation provides the best price-performance trade-off. The analysis of your usage patterns can help you decide when to move or resize your running resources, and still meet your performance and capacity requirements. For more information about Compute Optimizer, see the AWS Compute Optimizer User Guide.

" + "documentation":"

AWS Compute Optimizer is a service that analyzes the configuration and utilization metrics of your AWS resources, such as EC2 instances and Auto Scaling groups. It reports whether your resources are optimal, and generates optimization recommendations to reduce the cost and improve the performance of your workloads. Compute Optimizer also provides recent utilization metric data, as well as projected utilization metric data for the recommendations, which you can use to evaluate which recommendation provides the best price-performance trade-off. The analysis of your usage patterns can help you decide when to move or resize your running resources, and still meet your performance and capacity requirements. For more information about Compute Optimizer, including the required permissions to use the service, see the AWS Compute Optimizer User Guide.

" } diff --git a/services/config/pom.xml b/services/config/pom.xml index 450aa2eeb1b1..6315a47a57a3 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/connect/pom.xml b/services/connect/pom.xml index 96cff1f0da3c..cb8861a07c36 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index d9526c5a2608..251f6d000fc0 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -30,7 +30,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates a user account for the specified Amazon Connect instance.

" + "documentation":"

Creates a user account for the specified Amazon Connect instance.

For information about how to create user accounts using the Amazon Connect console, see Add Users in the Amazon Connect Administrator Guide.

" }, "DeleteUser":{ "name":"DeleteUser", @@ -46,7 +46,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Deletes a user account from the specified Amazon Connect instance.

" + "documentation":"

Deletes a user account from the specified Amazon Connect instance.

For information about what happens to a user's data when their account is deleted, see Delete Users from Your Amazon Connect Instance in the Amazon Connect Administrator Guide.

" }, "DescribeUser":{ "name":"DescribeUser", @@ -129,7 +129,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets the real-time metric data from the specified Amazon Connect instance.

For more information, see Real-time Metrics Reports in the Amazon Connect Administrator Guide.

" + "documentation":"

Gets the real-time metric data from the specified Amazon Connect instance.

For a description of each metric, see Real-time Metrics Definitions in the Amazon Connect Administrator Guide.

" }, "GetFederationToken":{ "name":"GetFederationToken", @@ -164,7 +164,7 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets historical metric data from the specified Amazon Connect instance.

For more information, see Historical Metrics Reports in the Amazon Connect Administrator Guide.

" + "documentation":"

Gets historical metric data from the specified Amazon Connect instance.

For a description of each historical metric, see Historical Metrics Definitions in the Amazon Connect Administrator Guide.

" }, "ListContactFlows":{ "name":"ListContactFlows", @@ -181,7 +181,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Provides information about the contact flows for the specified Amazon Connect instance.

" + "documentation":"

Provides information about the contact flows for the specified Amazon Connect instance.

For more information about contact flows, see Contact Flows in the Amazon Connect Administrator Guide.

" }, "ListHoursOfOperations":{ "name":"ListHoursOfOperations", @@ -198,7 +198,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Provides information about the hours of operation for the specified Amazon Connect instance.

" + "documentation":"

Provides information about the hours of operation for the specified Amazon Connect instance.

For more information about hours of operation, see Set the Hours of Operation for a Queue in the Amazon Connect Administrator Guide.

" }, "ListPhoneNumbers":{ "name":"ListPhoneNumbers", @@ -215,7 +215,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Provides information about the phone numbers for the specified Amazon Connect instance.

" + "documentation":"

Provides information about the phone numbers for the specified Amazon Connect instance.

For more information about phone numbers, see Set Up Phone Numbers for Your Contact Center in the Amazon Connect Administrator Guide.

" }, "ListQueues":{ "name":"ListQueues", @@ -232,7 +232,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Provides information about the queues for the specified Amazon Connect instance.

" + "documentation":"

Provides information about the queues for the specified Amazon Connect instance.

For more information about queues, see Queues: Standard and Agent in the Amazon Connect Administrator Guide.

" }, "ListRoutingProfiles":{ "name":"ListRoutingProfiles", @@ -249,7 +249,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Provides summary information about the routing profiles for the specified Amazon Connect instance.

" + "documentation":"

Provides summary information about the routing profiles for the specified Amazon Connect instance.

For more information about routing profiles, see Routing Profiles and Create a Routing Profile in the Amazon Connect Administrator Guide.

" }, "ListSecurityProfiles":{ "name":"ListSecurityProfiles", @@ -266,7 +266,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Provides summary information about the security profiles for the specified Amazon Connect instance.

" + "documentation":"

Provides summary information about the security profiles for the specified Amazon Connect instance.

For more information about security profiles, see Security Profiles in the Amazon Connect Administrator Guide.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -283,7 +283,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists the tags for the specified resource.

" + "documentation":"

Lists the tags for the specified resource.

For sample policies that use tags, see Amazon Connect Identity-Based Policy Examples in the Amazon Connect Administrator Guide.

" }, "ListUserHierarchyGroups":{ "name":"ListUserHierarchyGroups", @@ -300,7 +300,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Provides summary information about the hierarchy groups for the specified Amazon Connect instance.

" + "documentation":"

Provides summary information about the hierarchy groups for the specified Amazon Connect instance.

For more information about agent hierarchies, see Set Up Agent Hierarchies in the Amazon Connect Administrator Guide.

" }, "ListUsers":{ "name":"ListUsers", @@ -334,7 +334,7 @@ {"shape":"InternalServiceException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Initiates a contact flow to start a new chat for the customer. Response of this API provides a token required to obtain credentials from the CreateParticipantConnection API in the Amazon Connect Participant Service.

When a new chat contact is successfully created, clients need to subscribe to the participant’s connection for the created chat within 5 minutes. This is achieved by invoking CreateParticipantConnection with WEBSOCKET and CONNECTION_CREDENTIALS.

" + "documentation":"

Initiates a contact flow to start a new chat for the customer. Response of this API provides a token required to obtain credentials from the CreateParticipantConnection API in the Amazon Connect Participant Service.

When a new chat contact is successfully created, clients need to subscribe to the participant’s connection for the created chat within 5 minutes. This is achieved by invoking CreateParticipantConnection with WEBSOCKET and CONNECTION_CREDENTIALS.

A 429 error occurs in two situations:

  • API rate limit is exceeded. API TPS throttling returns a TooManyRequests exception from the API Gateway.

  • The quota for concurrent active chats is exceeded. Active chat throttling returns a LimitExceededException.

For more information about how chat works, see Chat in the Amazon Connect Administrator Guide.

" }, "StartOutboundVoiceContact":{ "name":"StartOutboundVoiceContact", @@ -353,7 +353,7 @@ {"shape":"DestinationNotAllowedException"}, {"shape":"OutboundContactNotPermittedException"} ], - "documentation":"

Initiates a contact flow to place an outbound call to a customer.

There is a 60 second dialing timeout for this operation. If the call is not connected after 60 seconds, it fails.

" + "documentation":"

This API places an outbound call to a contact, and then initiates the contact flow. It performs the actions in the contact flow that's specified (in ContactFlowId).

Agents are not involved in initiating the outbound API (that is, dialing the contact). If the contact flow places an outbound call to a contact, and then puts the contact in queue, that's when the call is routed to the agent, like any other inbound case.

There is a 60 second dialing timeout for this operation. If the call is not connected after 60 seconds, it fails.

UK numbers with a 447 prefix are not allowed by default. Before you can dial these UK mobile numbers, you must submit a service quota increase request. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide.

" }, "StopContact":{ "name":"StopContact", @@ -386,7 +386,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Adds the specified tags to the specified resource.

The supported resource type is users.

" + "documentation":"

Adds the specified tags to the specified resource.

The supported resource type is users.

For sample policies that use tags, see Amazon Connect Identity-Based Policy Examples in the Amazon Connect Administrator Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -450,7 +450,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates the identity information for the specified user.

" + "documentation":"

Updates the identity information for the specified user.

Someone with the ability to invoke UpdateUserIndentityInfo can change the login credentials of other users by changing their email address. This poses a security risk to your organization. They can change the email address of a user to the attacker's email address, and then reset the password through email. We strongly recommend limiting who has the ability to invoke UpdateUserIndentityInfo. For more information, see Best Practices for Security Profiles in the Amazon Connect Administrator Guide.

" }, "UpdateUserPhoneConfig":{ "name":"UpdateUserPhoneConfig", @@ -754,7 +754,7 @@ "documentation":"

The unit for the metric.

" } }, - "documentation":"

Contains information about a real-time metric.

" + "documentation":"

Contains information about a real-time metric. For a description of each metric, see Real-time Metrics Definitions in the Amazon Connect Administrator Guide.

" }, "CurrentMetricData":{ "type":"structure", @@ -1028,7 +1028,7 @@ }, "CurrentMetrics":{ "shape":"CurrentMetrics", - "documentation":"

The metrics to retrieve. Specify the name and unit for each metric. The following metrics are available:

AGENTS_AFTER_CONTACT_WORK

Unit: COUNT

AGENTS_AVAILABLE

Unit: COUNT

AGENTS_ERROR

Unit: COUNT

AGENTS_NON_PRODUCTIVE

Unit: COUNT

AGENTS_ON_CALL

Unit: COUNT

AGENTS_ON_CONTACT

Unit: COUNT

AGENTS_ONLINE

Unit: COUNT

AGENTS_STAFFED

Unit: COUNT

CONTACTS_IN_QUEUE

Unit: COUNT

CONTACTS_SCHEDULED

Unit: COUNT

OLDEST_CONTACT_AGE

Unit: SECONDS

SLOTS_ACTIVE

Unit: COUNT

SLOTS_AVAILABLE

Unit: COUNT

" + "documentation":"

The metrics to retrieve. Specify the name and unit for each metric. The following metrics are available. For a description of all the metrics, see Real-time Metrics Definitions in the Amazon Connect Administrator Guide.

AGENTS_AFTER_CONTACT_WORK

Unit: COUNT

Name in real-time metrics report: ACW

AGENTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Available

AGENTS_ERROR

Unit: COUNT

Name in real-time metrics report: Error

AGENTS_NON_PRODUCTIVE

Unit: COUNT

Name in real-time metrics report: NPT (Non-Productive Time)

AGENTS_ON_CALL

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ON_CONTACT

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ONLINE

Unit: COUNT

Name in real-time metrics report: Online

AGENTS_STAFFED

Unit: COUNT

Name in real-time metrics report: Staffed

CONTACTS_IN_QUEUE

Unit: COUNT

Name in real-time metrics report: In queue

CONTACTS_SCHEDULED

Unit: COUNT

Name in real-time metrics report: Scheduled

OLDEST_CONTACT_AGE

Unit: SECONDS

When you use groupings, Unit says SECONDS but the Value is returned in MILLISECONDS. For example, if you get a response like this:

{ \"Metric\": { \"Name\": \"OLDEST_CONTACT_AGE\", \"Unit\": \"SECONDS\" }, \"Value\": 24113.0 }

The actual OLDEST_CONTACT_AGE is 24 seconds.

Name in real-time metrics report: Oldest

SLOTS_ACTIVE

Unit: COUNT

Name in real-time metrics report: Active

SLOTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Availability

" }, "NextToken":{ "shape":"NextToken", @@ -1113,7 +1113,7 @@ }, "HistoricalMetrics":{ "shape":"HistoricalMetrics", - "documentation":"

The metrics to retrieve. Specify the name, unit, and statistic for each metric. The following historical metrics are available:

ABANDON_TIME

Unit: SECONDS

Statistic: AVG

AFTER_CONTACT_WORK_TIME

Unit: SECONDS

Statistic: AVG

API_CONTACTS_HANDLED

Unit: COUNT

Statistic: SUM

CALLBACK_CONTACTS_HANDLED

Unit: COUNT

Statistic: SUM

CONTACTS_ABANDONED

Unit: COUNT

Statistic: SUM

CONTACTS_AGENT_HUNG_UP_FIRST

Unit: COUNT

Statistic: SUM

CONTACTS_CONSULTED

Unit: COUNT

Statistic: SUM

CONTACTS_HANDLED

Unit: COUNT

Statistic: SUM

CONTACTS_HANDLED_INCOMING

Unit: COUNT

Statistic: SUM

CONTACTS_HANDLED_OUTBOUND

Unit: COUNT

Statistic: SUM

CONTACTS_HOLD_ABANDONS

Unit: COUNT

Statistic: SUM

CONTACTS_MISSED

Unit: COUNT

Statistic: SUM

CONTACTS_QUEUED

Unit: COUNT

Statistic: SUM

CONTACTS_TRANSFERRED_IN

Unit: COUNT

Statistic: SUM

CONTACTS_TRANSFERRED_IN_FROM_QUEUE

Unit: COUNT

Statistic: SUM

CONTACTS_TRANSFERRED_OUT

Unit: COUNT

Statistic: SUM

CONTACTS_TRANSFERRED_OUT_FROM_QUEUE

Unit: COUNT

Statistic: SUM

HANDLE_TIME

Unit: SECONDS

Statistic: AVG

HOLD_TIME

Unit: SECONDS

Statistic: AVG

INTERACTION_AND_HOLD_TIME

Unit: SECONDS

Statistic: AVG

INTERACTION_TIME

Unit: SECONDS

Statistic: AVG

OCCUPANCY

Unit: PERCENT

Statistic: AVG

QUEUE_ANSWER_TIME

Unit: SECONDS

Statistic: AVG

QUEUED_TIME

Unit: SECONDS

Statistic: MAX

SERVICE_LEVEL

Unit: PERCENT

Statistic: AVG

Threshold: Only \"Less than\" comparisons are supported, with the following service level thresholds: 15, 20, 25, 30, 45, 60, 90, 120, 180, 240, 300, 600

" + "documentation":"

The metrics to retrieve. Specify the name, unit, and statistic for each metric. The following historical metrics are available. For a description of each metric, see Historical Metrics Definitions in the Amazon Connect Administrator Guide.

ABANDON_TIME

Unit: SECONDS

Statistic: AVG

AFTER_CONTACT_WORK_TIME

Unit: SECONDS

Statistic: AVG

API_CONTACTS_HANDLED

Unit: COUNT

Statistic: SUM

CALLBACK_CONTACTS_HANDLED

Unit: COUNT

Statistic: SUM

CONTACTS_ABANDONED

Unit: COUNT

Statistic: SUM

CONTACTS_AGENT_HUNG_UP_FIRST

Unit: COUNT

Statistic: SUM

CONTACTS_CONSULTED

Unit: COUNT

Statistic: SUM

CONTACTS_HANDLED

Unit: COUNT

Statistic: SUM

CONTACTS_HANDLED_INCOMING

Unit: COUNT

Statistic: SUM

CONTACTS_HANDLED_OUTBOUND

Unit: COUNT

Statistic: SUM

CONTACTS_HOLD_ABANDONS

Unit: COUNT

Statistic: SUM

CONTACTS_MISSED

Unit: COUNT

Statistic: SUM

CONTACTS_QUEUED

Unit: COUNT

Statistic: SUM

CONTACTS_TRANSFERRED_IN

Unit: COUNT

Statistic: SUM

CONTACTS_TRANSFERRED_IN_FROM_QUEUE

Unit: COUNT

Statistic: SUM

CONTACTS_TRANSFERRED_OUT

Unit: COUNT

Statistic: SUM

CONTACTS_TRANSFERRED_OUT_FROM_QUEUE

Unit: COUNT

Statistic: SUM

HANDLE_TIME

Unit: SECONDS

Statistic: AVG

HOLD_TIME

Unit: SECONDS

Statistic: AVG

INTERACTION_AND_HOLD_TIME

Unit: SECONDS

Statistic: AVG

INTERACTION_TIME

Unit: SECONDS

Statistic: AVG

OCCUPANCY

Unit: PERCENT

Statistic: AVG

QUEUE_ANSWER_TIME

Unit: SECONDS

Statistic: AVG

QUEUED_TIME

Unit: SECONDS

Statistic: MAX

SERVICE_LEVEL

Unit: PERCENT

Statistic: AVG

Threshold: Only \"Less than\" comparisons are supported, with the following service level thresholds: 15, 20, 25, 30, 45, 60, 90, 120, 180, 240, 300, 600

" }, "NextToken":{ "shape":"NextToken", @@ -1294,7 +1294,7 @@ "documentation":"

The unit for the metric.

" } }, - "documentation":"

Contains information about a historical metric.

" + "documentation":"

Contains information about a historical metric. For a description of each metric, see Historical Metrics Definitions in the Amazon Connect Administrator Guide.

" }, "HistoricalMetricData":{ "type":"structure", @@ -2289,7 +2289,7 @@ }, "ContactFlowId":{ "shape":"ContactFlowId", - "documentation":"

The identifier of the contact flow for the chat.

" + "documentation":"

The identifier of the contact flow for the outbound call. To see the ContactFlowId in the Amazon Connect console user interface, on the navigation menu go to Routing, Contact Flows. Choose the contact flow. On the contact flow page, under the name of the contact flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold:

arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx

" }, "Attributes":{ "shape":"Attributes", @@ -2341,7 +2341,7 @@ }, "ContactFlowId":{ "shape":"ContactFlowId", - "documentation":"

The identifier of the contact flow for the outbound call.

" + "documentation":"

The identifier of the contact flow for the outbound call. To see the ContactFlowId in the Amazon Connect console user interface, on the navigation menu go to Routing, Contact Flows. Choose the contact flow. On the contact flow page, under the name of the contact flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold:

arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx

" }, "InstanceId":{ "shape":"InstanceId", @@ -2779,5 +2779,5 @@ "Value":{"type":"double"}, "timestamp":{"type":"timestamp"} }, - "documentation":"

Amazon Connect is a cloud-based contact center solution that makes it easy to set up and manage a customer contact center and provide reliable customer engagement at any scale.

Amazon Connect provides rich metrics and real-time reporting that allow you to optimize contact routing. You can also resolve customer issues more efficiently by putting customers in touch with the right agents.

There are limits to the number of Amazon Connect resources that you can create and limits to the number of requests that you can make per second. For more information, see Amazon Connect Service Limits in the Amazon Connect Administrator Guide.

" + "documentation":"

Amazon Connect is a cloud-based contact center solution that makes it easy to set up and manage a customer contact center and provide reliable customer engagement at any scale.

Amazon Connect provides rich metrics and real-time reporting that allow you to optimize contact routing. You can also resolve customer issues more efficiently by putting customers in touch with the right agents.

There are limits to the number of Amazon Connect resources that you can create and limits to the number of requests that you can make per second. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide.

To connect programmatically to an AWS service, you use an endpoint. For a list of Amazon Connect endpoints, see Amazon Connect Endpoints.

" } diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index 910371b621b9..b6509513a42f 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index 7f80f2e32a6a..4c4217a62fce 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index 84e8108f5b0b..f77f625f4d24 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 costexplorer diff --git a/services/costexplorer/src/main/resources/codegen-resources/paginators-1.json b/services/costexplorer/src/main/resources/codegen-resources/paginators-1.json index e976e315b131..431b8e5dc523 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/paginators-1.json +++ b/services/costexplorer/src/main/resources/codegen-resources/paginators-1.json @@ -9,6 +9,11 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" + }, + "ListCostCategoryDefinitions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/services/costexplorer/src/main/resources/codegen-resources/service-2.json b/services/costexplorer/src/main/resources/codegen-resources/service-2.json index 1372016b63db..e8e35ffa22b0 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/service-2.json +++ b/services/costexplorer/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Creates a new Cost Category with the requested name and rules.

" + "documentation":"

Creates a new Cost Category with the requested name and rules.

" }, "DeleteCostCategoryDefinition":{ "name":"DeleteCostCategoryDefinition", @@ -40,7 +40,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Deletes a Cost Category. Expenses from this month going forward will no longer be categorized with this Cost Category.

" + "documentation":"

Deletes a Cost Category. Expenses from this month going forward will no longer be categorized with this Cost Category.

" }, "DescribeCostCategoryDefinition":{ "name":"DescribeCostCategoryDefinition", @@ -54,7 +54,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Returns the name, ARN, rules, definition, and effective dates of a Cost Category that's defined in the account.

You have the option to use EffectiveOn to return a Cost Category that is active on a specific date. If there is no EffectiveOn specified, you’ll see a Cost Category that is effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response.

" + "documentation":"

Returns the name, ARN, rules, definition, and effective dates of a Cost Category that's defined in the account.

You have the option to use EffectiveOn to return a Cost Category that is active on a specific date. If there is no EffectiveOn specified, you’ll see a Cost Category that is effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response.

" }, "GetCostAndUsage":{ "name":"GetCostAndUsage", @@ -88,7 +88,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"RequestChangedException"} ], - "documentation":"

Retrieves cost and usage metrics with resources for your account. You can specify which cost and usage-related metric, such as BlendedCosts or UsageQuantity, that you want the request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Master accounts in an organization in AWS Organizations have access to all member accounts. This API is currently available for the Amazon Elastic Compute Cloud – Compute service only.

This is an opt-in only feature. You can enable this feature from the Cost Explorer Settings page. For information on how to access the Settings page, see Controlling Access for Cost Explorer in the AWS Billing and Cost Management User Guide.

" + "documentation":"

Retrieves cost and usage metrics with resources for your account. You can specify which cost and usage-related metric, such as BlendedCosts or UsageQuantity, that you want the request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Master accounts in an organization in AWS Organizations have access to all member accounts. This API is currently available for the Amazon Elastic Compute Cloud – Compute service only.

This is an opt-in only feature. You can enable this feature from the Cost Explorer Settings page. For information on how to access the Settings page, see Controlling Access for Cost Explorer in the AWS Billing and Cost Management User Guide.

" }, "GetCostForecast":{ "name":"GetCostForecast", @@ -134,7 +134,7 @@ {"shape":"DataUnavailableException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Retrieves the reservation coverage for your account. This enables you to see how much of your Amazon Elastic Compute Cloud, Amazon ElastiCache, Amazon Relational Database Service, or Amazon Redshift usage is covered by a reservation. An organization's master account can see the coverage of the associated member accounts. For any time period, you can filter data about reservation usage by the following dimensions:

  • AZ

  • CACHE_ENGINE

  • DATABASE_ENGINE

  • DEPLOYMENT_OPTION

  • INSTANCE_TYPE

  • LINKED_ACCOUNT

  • OPERATING_SYSTEM

  • PLATFORM

  • REGION

  • SERVICE

  • TAG

  • TENANCY

To determine valid values for a dimension, use the GetDimensionValues operation.

" + "documentation":"

Retrieves the reservation coverage for your account. This enables you to see how much of your Amazon Elastic Compute Cloud, Amazon ElastiCache, Amazon Relational Database Service, or Amazon Redshift usage is covered by a reservation. An organization's master account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data about reservation usage by the following dimensions:

  • AZ

  • CACHE_ENGINE

  • DATABASE_ENGINE

  • DEPLOYMENT_OPTION

  • INSTANCE_TYPE

  • LINKED_ACCOUNT

  • OPERATING_SYSTEM

  • PLATFORM

  • REGION

  • SERVICE

  • TAG

  • TENANCY

To determine valid values for a dimension, use the GetDimensionValues operation.

" }, "GetReservationPurchaseRecommendation":{ "name":"GetReservationPurchaseRecommendation", @@ -178,7 +178,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Creates recommendations that helps you save cost by identifying idle and underutilized Amazon EC2 instances.

Recommendations are generated to either downsize or terminate instances, along with providing savings detail and metrics. For details on calculation and function, see Optimizing Your Cost with Rightsizing Recommendations.

" + "documentation":"

Creates recommendations that help you save cost by identifying idle and underutilized Amazon EC2 instances.

Recommendations are generated to either downsize or terminate instances, along with providing savings detail and metrics. For details on calculation and function, see Optimizing Your Cost with Rightsizing Recommendations in the AWS Billing and Cost Management User Guide.

" }, "GetSavingsPlansCoverage":{ "name":"GetSavingsPlansCoverage", @@ -193,7 +193,7 @@ {"shape":"DataUnavailableException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Retrieves the Savings Plans covered for your account. This enables you to see how much of your cost is covered by a Savings Plan. An organization’s master account can see the coverage of the associated member accounts. For any time period, you can filter data for Savings Plans usage with the following dimensions:

  • LINKED_ACCOUNT

  • REGION

  • SERVICE

  • INSTANCE_FAMILY

To determine valid values for a dimension, use the GetDimensionValues operation.

" + "documentation":"

Retrieves the Savings Plans covered for your account. This enables you to see how much of your cost is covered by a Savings Plan. An organization’s master account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data for Savings Plans usage with the following dimensions:

  • LINKED_ACCOUNT

  • REGION

  • SERVICE

  • INSTANCE_FAMILY

To determine valid values for a dimension, use the GetDimensionValues operation.

" }, "GetSavingsPlansPurchaseRecommendation":{ "name":"GetSavingsPlansPurchaseRecommendation", @@ -281,7 +281,7 @@ "errors":[ {"shape":"LimitExceededException"} ], - "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Returns the name, ARN and effective dates of all Cost Categories defined in the account. You have the option to use EffectiveOn to return a list of Cost Categories that were active on a specific date. If there is no EffectiveOn specified, you’ll see Cost Categories that are effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response.

" + "documentation":"

Returns the name, ARN, NumberOfRules and effective dates of all Cost Categories defined in the account. You have the option to use EffectiveOn to return a list of Cost Categories that were active on a specific date. If there is no EffectiveOn specified, you’ll see Cost Categories that are effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response. ListCostCategoryDefinitions supports pagination. The request can have a MaxResults range up to 100.

" }, "UpdateCostCategoryDefinition":{ "name":"UpdateCostCategoryDefinition", @@ -296,7 +296,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Updates an existing Cost Category. Changes made to the Cost Category rules will be used to categorize the current month’s expenses and future expenses. This won’t change categorization for the previous months.

" + "documentation":"

Updates an existing Cost Category. Changes made to the Cost Category rules will be used to categorize the current month’s expenses and future expenses. This won’t change categorization for the previous months.

" } }, "shapes":{ @@ -367,7 +367,12 @@ "documentation":"

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" } }, - "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

The structure of Cost Categories. This includes detailed metadata and the set of rules for the CostCategory object.

" + "documentation":"

The structure of Cost Categories. This includes detailed metadata and the set of rules for the CostCategory object.

" + }, + "CostCategoryMaxResults":{ + "type":"integer", + "max":100, + "min":1 }, "CostCategoryName":{ "type":"string", @@ -381,7 +386,7 @@ "members":{ "CostCategoryArn":{ "shape":"Arn", - "documentation":"

The unique identifier for your Cost Category Reference.

" + "documentation":"

The unique identifier for your Cost Category.

" }, "Name":{"shape":"CostCategoryName"}, "EffectiveStart":{ @@ -391,9 +396,13 @@ "EffectiveEnd":{ "shape":"ZonedDateTime", "documentation":"

The Cost Category's effective end date.

" + }, + "NumberOfRules":{ + "shape":"NonNegativeInteger", + "documentation":"

The number of rules associated with a specific Cost Category.

" } }, - "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

A reference to a Cost Category containing only enough information to identify the Cost Category.

You can use this information to retrieve the full Cost Category information using DescribeCostCategory.

" + "documentation":"

A reference to a Cost Category containing only enough information to identify the Cost Category.

You can use this information to retrieve the full Cost Category information using DescribeCostCategory.

" }, "CostCategoryReferencesList":{ "type":"list", @@ -409,10 +418,10 @@ "Value":{"shape":"CostCategoryValue"}, "Rule":{ "shape":"Expression", - "documentation":"

An Expression object used to categorize costs. This supports dimensions, Tags, and nested expressions. Currently the only dimensions supported is LINKED_ACCOUNT.

Root level OR is not supported. We recommend you create a separate rule instead.

" + "documentation":"

An Expression object used to categorize costs. This supports dimensions, Tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, and LINKED_ACCOUNT_NAME.

Root level OR is not supported. We recommend that you create a separate rule instead.

RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the AWS Billing and Cost Management User Guide.

" } }, - "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" + "documentation":"

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" }, "CostCategoryRuleVersion":{ "type":"string", @@ -441,7 +450,7 @@ "documentation":"

The specific value of the Cost Category.

" } }, - "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

The values that are available for Cost Categories.

" + "documentation":"

The Cost Categories values used for filtering the costs.

" }, "Coverage":{ "type":"structure", @@ -484,10 +493,10 @@ "members":{ "OnDemandCost":{ "shape":"OnDemandCost", - "documentation":"

How much an On-Demand instance cost.

" + "documentation":"

How much an On-Demand Instance costs.

" } }, - "documentation":"

How much it cost to run an instance.

" + "documentation":"

How much it costs to run an instance.

" }, "CoverageHours":{ "type":"structure", @@ -551,7 +560,7 @@ "RuleVersion":{"shape":"CostCategoryRuleVersion"}, "Rules":{ "shape":"CostCategoryRulesList", - "documentation":"

CreateCostCategoryDefinition supports dimensions, Tags, and nested expressions. Currently the only dimensions supported is LINKED_ACCOUNT.

Root level OR is not supported. We recommend you create a separate rule instead.

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" + "documentation":"

The Cost Category rules used to categorize costs. For more information, see CostCategoryRule.

" } } }, @@ -575,6 +584,10 @@ "shape":"GenericString", "documentation":"

Resource ID of the current instance.

" }, + "InstanceName":{ + "shape":"GenericString", + "documentation":"

The name you've given an instance. This field will show as blank if you haven't given the instance a name.

" + }, "Tags":{ "shape":"TagValuesList", "documentation":"

Cost allocation resource tags applied to the instance.

" @@ -689,10 +702,12 @@ "AZ", "INSTANCE_TYPE", "LINKED_ACCOUNT", + "LINKED_ACCOUNT_NAME", "OPERATION", "PURCHASE_TYPE", "REGION", "SERVICE", + "SERVICE_CODE", "USAGE_TYPE", "USAGE_TYPE_GROUP", "RECORD_TYPE", @@ -724,7 +739,11 @@ }, "Values":{ "shape":"Values", - "documentation":"

The metadata values that you can use to filter and group your results. You can use GetDimensionValues to find specific values.

Valid values for the SERVICE dimension are Amazon Elastic Compute Cloud - Compute, Amazon Elasticsearch Service, Amazon ElastiCache, Amazon Redshift, and Amazon Relational Database Service.

" + "documentation":"

The metadata values that you can use to filter and group your results. You can use GetDimensionValues to find specific values.

" + }, + "MatchOptions":{ + "shape":"MatchOptions", + "documentation":"

The match options that you can use to filter your results. MatchOptions is only applicable for actions related to Cost Category. The default values for MatchOptions is EQUALS and CASE_SENSITIVE.

" } }, "documentation":"

The metadata that you can use to filter and group your results. You can use GetDimensionValues to find specific values.

" @@ -939,7 +958,7 @@ }, "CostCategories":{ "shape":"CostCategoryValues", - "documentation":"

Cost Category is in public beta for AWS Billing and Cost Management and is subject to change. Your use of Cost Categories is subject to the Beta Service Participation terms of the AWS Service Terms (Section 1.10).

The specific CostCategory used for Expression.

" + "documentation":"

The filter based on CostCategory values.

" } }, "documentation":"

Use Expression to filter by cost or by usage. There are two patterns:

  • Simple dimension values - You can set the dimension name and values for the filters that you plan to use. For example, you can filter for REGION==us-east-1 OR REGION==us-west-1. The Expression for that looks like this:

    { \"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", “us-west-1” ] } }

    The list of dimension values are OR'd together to retrieve cost or usage data. You can create Expression and DimensionValues objects using either with* methods or set* methods in multiple lines.

  • Compound dimension values with logical operations - You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. This allows you to filter on more advanced options. For example, you can filter on ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer). The Expression for that looks like this:

    { \"And\": [ {\"Or\": [ {\"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", \"us-west-1\" ] }}, {\"Tags\": { \"Key\": \"TagName\", \"Values\": [\"Value1\"] } } ]}, {\"Not\": {\"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [\"DataTransfer\"] }}} ] }

    Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error.

    { \"And\": [ ... ], \"DimensionValues\": { \"Dimension\": \"USAGE_TYPE\", \"Values\": [ \"DataTransfer\" ] } }

For GetRightsizingRecommendation action, a combination of OR and NOT is not supported. OR is not supported between different dimensions, or dimensions and tags. NOT operators aren't supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE.

" @@ -975,7 +994,12 @@ "member":{"shape":"ForecastResult"} }, "GenericBoolean":{"type":"boolean"}, - "GenericString":{"type":"string"}, + "GenericString":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[\\S\\s]*" + }, "GetCostAndUsageRequest":{ "type":"structure", "required":["TimePeriod"], @@ -1037,11 +1061,11 @@ }, "Filter":{ "shape":"Expression", - "documentation":"

Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression.

The GetCostAndUsageWithResources operation requires that you either group by or filter by a ResourceId.

" + "documentation":"

Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression.

The GetCostAndUsageWithResources operation requires that you either group by or filter by a ResourceId.

" }, "Metrics":{ "shape":"MetricNames", - "documentation":"

Which metrics are returned in the query. For more information about blended and unblended rates, see Why does the \"blended\" annotation appear on some line items in my bill?.

Valid values are AmortizedCost, BlendedCost, NetAmortizedCost, NetUnblendedCost, NormalizedUsageAmount, UnblendedCost, and UsageQuantity.

If you return the UsageQuantity metric, the service aggregates all usage numbers without taking the units into account. For example, if you aggregate usageQuantity across all of Amazon EC2, the results aren't meaningful because Amazon EC2 compute hours and data transfer are measured in different units (for example, hours vs. GB). To get more meaningful UsageQuantity metrics, filter by UsageType or UsageTypeGroups.

Metrics is required for GetCostAndUsageWithResources requests.

" + "documentation":"

Which metrics are returned in the query. For more information about blended and unblended rates, see Why does the \"blended\" annotation appear on some line items in my bill?.

Valid values are AmortizedCost, BlendedCost, NetAmortizedCost, NetUnblendedCost, NormalizedUsageAmount, UnblendedCost, and UsageQuantity.

If you return the UsageQuantity metric, the service aggregates all usage numbers without taking the units into account. For example, if you aggregate usageQuantity across all of Amazon EC2, the results aren't meaningful because Amazon EC2 compute hours and data transfer are measured in different units (for example, hours vs. GB). To get more meaningful UsageQuantity metrics, filter by UsageType or UsageTypeGroups.

Metrics is required for GetCostAndUsageWithResources requests.

" }, "GroupBy":{ "shape":"GroupDefinitions", @@ -1186,7 +1210,7 @@ }, "Filter":{ "shape":"Expression", - "documentation":"

Filters utilization data by dimensions. You can filter by the following dimensions:

  • AZ

  • CACHE_ENGINE

  • DATABASE_ENGINE

  • DEPLOYMENT_OPTION

  • INSTANCE_TYPE

  • LINKED_ACCOUNT

  • OPERATING_SYSTEM

  • PLATFORM

  • REGION

  • SERVICE

  • TAG

  • TENANCY

GetReservationCoverage uses the same Expression object as the other operations, but only AND is supported among each dimension. You can nest only one level deep. If there are multiple values for a dimension, they are OR'd together.

If you don't provide a SERVICE filter, Cost Explorer defaults to EC2.

" + "documentation":"

Filters utilization data by dimensions. You can filter by the following dimensions:

  • AZ

  • CACHE_ENGINE

  • DATABASE_ENGINE

  • DEPLOYMENT_OPTION

  • INSTANCE_TYPE

  • LINKED_ACCOUNT

  • OPERATING_SYSTEM

  • PLATFORM

  • REGION

  • SERVICE

  • TAG

  • TENANCY

GetReservationCoverage uses the same Expression object as the other operations, but only AND is supported among each dimension. You can nest only one level deep. If there are multiple values for a dimension, they are OR'd together.

If you don't provide a SERVICE filter, Cost Explorer defaults to EC2.

Cost category is also supported.

" }, "Metrics":{ "shape":"MetricNames", @@ -1325,6 +1349,10 @@ "required":["Service"], "members":{ "Filter":{"shape":"Expression"}, + "Configuration":{ + "shape":"RightsizingRecommendationConfiguration", + "documentation":"

Enables you to customize recommendations across two attributes. You can choose to view recommendations for instances within the same instance families or across different instance families. You can also choose to view your estimated savings associated with recommendations with consideration of existing Savings Plans or RI benefits, or neither.

" + }, "Service":{ "shape":"GenericString", "documentation":"

The specific service that you want recommendations for. The only valid value for GetRightsizingRecommendation is \"AmazonEC2\".

" @@ -1357,6 +1385,10 @@ "NextPageToken":{ "shape":"NextPageToken", "documentation":"

The token to retrieve the next set of results.

" + }, + "Configuration":{ + "shape":"RightsizingRecommendationConfiguration", + "documentation":"

Enables you to customize recommendations across two attributes. You can choose to view recommendations for instances within the same instance families or across different instance families. You can also choose to view your estimated savings associated with recommendations with consideration of existing Savings Plans or RI benefits, or neither.

" } } }, @@ -1378,7 +1410,7 @@ }, "Filter":{ "shape":"Expression", - "documentation":"

Filters Savings Plans coverage data by dimensions. You can filter data for Savings Plans usage with the following dimensions:

  • LINKED_ACCOUNT

  • REGION

  • SERVICE

  • INSTANCE_FAMILY

GetSavingsPlansCoverage uses the same Expression object as the other operations, but only AND is supported among each dimension. If there are multiple values for a dimension, they are OR'd together.

" + "documentation":"

Filters Savings Plans coverage data by dimensions. You can filter data for Savings Plans usage with the following dimensions:

  • LINKED_ACCOUNT

  • REGION

  • SERVICE

  • INSTANCE_FAMILY

GetSavingsPlansCoverage uses the same Expression object as the other operations, but only AND is supported among each dimension. If there are multiple values for a dimension, they are OR'd together.

Cost category is also supported.

" }, "Metrics":{ "shape":"MetricNames", @@ -1424,7 +1456,7 @@ }, "TermInYears":{ "shape":"TermInYears", - "documentation":"

The savings plan recommendation term used to generated these recommendations.

" + "documentation":"

The savings plan recommendation term used to generate these recommendations.

" }, "PaymentOption":{ "shape":"PaymentOption", @@ -1673,7 +1705,12 @@ }, "documentation":"

Represents a group when you specify a group by criteria or in the response to a query with a specific grouping.

" }, - "GroupDefinitionKey":{"type":"string"}, + "GroupDefinitionKey":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[\\S\\s]*" + }, "GroupDefinitionType":{ "type":"string", "enum":[ @@ -1746,7 +1783,12 @@ }, "NextToken":{ "shape":"NextPageToken", - "documentation":"

The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.

You can use this information to retrieve the full Cost Category information using DescribeCostCategory.

" + "documentation":"

The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.

" + }, + "MaxResults":{ + "shape":"CostCategoryMaxResults", + "documentation":"

The number of entries a paginated response contains.

", + "box":true } } }, @@ -1771,6 +1813,21 @@ "SIXTY_DAYS" ] }, + "MatchOption":{ + "type":"string", + "enum":[ + "EQUALS", + "STARTS_WITH", + "ENDS_WITH", + "CONTAINS", + "CASE_SENSITIVE", + "CASE_INSENSITIVE" + ] + }, + "MatchOptions":{ + "type":"list", + "member":{"shape":"MatchOption"} + }, "MaxResults":{ "type":"integer", "min":1 @@ -1788,7 +1845,12 @@ ] }, "MetricAmount":{"type":"string"}, - "MetricName":{"type":"string"}, + "MetricName":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[\\S\\s]*" + }, "MetricNames":{ "type":"list", "member":{"shape":"MetricName"} @@ -1824,7 +1886,12 @@ "documentation":"

Details on the modification recommendation.

" }, "NetRISavings":{"type":"string"}, - "NextPageToken":{"type":"string"}, + "NextPageToken":{ + "type":"string", + "max":8192, + "min":0, + "pattern":"[\\S\\s]*" + }, "NonNegativeInteger":{ "type":"integer", "min":0 @@ -1901,6 +1968,13 @@ }, "documentation":"

Details about the Amazon RDS instances that AWS recommends that you purchase.

" }, + "RecommendationTarget":{ + "type":"string", + "enum":[ + "SAME_INSTANCE_FAMILY", + "CROSS_INSTANCE_FAMILY" + ] + }, "RedshiftInstanceDetails":{ "type":"structure", "members":{ @@ -2146,10 +2220,10 @@ }, "GenerationTimestamp":{ "shape":"GenericString", - "documentation":"

The time stamp for when AWS made this recommendation.

" + "documentation":"

The timestamp for when AWS made this recommendation.

" } }, - "documentation":"

Information about this specific recommendation, such as the time stamp for when AWS made a specific recommendation.

" + "documentation":"

Information about this specific recommendation, such as the timestamp for when AWS made a specific recommendation.

" }, "ReservationPurchaseRecommendationSummary":{ "type":"structure", @@ -2281,6 +2355,24 @@ }, "documentation":"

Recommendations to rightsize resources.

" }, + "RightsizingRecommendationConfiguration":{ + "type":"structure", + "required":[ + "RecommendationTarget", + "BenefitsConsidered" + ], + "members":{ + "RecommendationTarget":{ + "shape":"RecommendationTarget", + "documentation":"

The option to see recommendations within the same instance family, or recommendations for instances across other families. The default value is SAME_INSTANCE_FAMILY.

" + }, + "BenefitsConsidered":{ + "shape":"GenericBoolean", + "documentation":"

The option to consider RI or Savings Plans discount benefits in your savings calculation. The default value is TRUE.

" + } + }, + "documentation":"

Enables you to customize recommendations across two attributes. You can choose to view recommendations for instances within the same instance families or across different instance families. You can also choose to view your estimated savings associated with recommendations with consideration of existing Savings Plans or RI benefits, or neither.

" + }, "RightsizingRecommendationList":{ "type":"list", "member":{"shape":"RightsizingRecommendation"} @@ -2294,7 +2386,7 @@ }, "GenerationTimestamp":{ "shape":"GenericString", - "documentation":"

The time stamp for when Amazon Web Services made this recommendation.

" + "documentation":"

The timestamp for when Amazon Web Services made this recommendation.

" }, "LookbackPeriodInDays":{ "shape":"LookbackPeriodInDays", @@ -2320,7 +2412,7 @@ }, "SavingsPercentage":{ "shape":"GenericString", - "documentation":"

Savings percentage based on the recommended modifications, relative to the total On Demand costs associated with these instances.

" + "documentation":"

Savings percentage based on the recommended modifications, relative to the total On-Demand costs associated with these instances.

" } }, "documentation":"

Summary of rightsizing recommendations

" @@ -2383,7 +2475,7 @@ }, "CoveragePercentage":{ "shape":"GenericString", - "documentation":"

The percentage of your existing Savings Planscovered usage, divided by all of your eligible Savings Plans usage in an account(or set of accounts).

" + "documentation":"

The percentage of your existing Savings Plans covered usage, divided by all of your eligible Savings Plans usage in an account(or set of accounts).

" } }, "documentation":"

Specific coverage percentage, On-Demand costs, and spend covered by Savings Plans, and total Savings Plans costs for an account.

" @@ -2435,7 +2527,7 @@ }, "SavingsPlansPurchaseRecommendationDetails":{ "shape":"SavingsPlansPurchaseRecommendationDetailList", - "documentation":"

Details for the Savings Plans we recommend you to purchase to cover existing, Savings Plans eligible workloads.

" + "documentation":"

Details for the Savings Plans we recommend that you purchase to cover existing Savings Plans eligible workloads.

" }, "SavingsPlansPurchaseRecommendationSummary":{ "shape":"SavingsPlansPurchaseRecommendationSummary", @@ -2694,7 +2786,12 @@ "type":"list", "member":{"shape":"SavingsPlansUtilizationByTime"} }, - "SearchString":{"type":"string"}, + "SearchString":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[\\S\\s]*" + }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ @@ -2720,7 +2817,12 @@ "EC2_INSTANCE_SP" ] }, - "TagKey":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[\\S\\s]*" + }, "TagList":{ "type":"list", "member":{"shape":"Entity"} @@ -2735,6 +2837,10 @@ "Values":{ "shape":"Values", "documentation":"

The specific value of the tag.

" + }, + "MatchOptions":{ + "shape":"MatchOptions", + "documentation":"

The match options that you can use to filter your results. MatchOptions is only applicable for only applicable for actions related to Cost Category. The default values for MatchOptions is EQUALS and CASE_SENSITIVE.

" } }, "documentation":"

The values that are available for a tag.

" @@ -2829,7 +2935,7 @@ "RuleVersion":{"shape":"CostCategoryRuleVersion"}, "Rules":{ "shape":"CostCategoryRulesList", - "documentation":"

UpdateCostCategoryDefinition supports dimensions, Tags, and nested expressions. Currently the only dimensions supported is LINKED_ACCOUNT.

Root level OR is not supported. We recommend you create a separate rule instead.

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" + "documentation":"

The Expression object used to categorize costs. For more information, see CostCategoryRule .

" } } }, @@ -2870,13 +2976,20 @@ "type":"list", "member":{"shape":"UtilizationByTime"} }, - "Value":{"type":"string"}, + "Value":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[\\S\\s]*" + }, "Values":{ "type":"list", "member":{"shape":"Value"} }, "YearMonthDay":{ "type":"string", + "max":40, + "min":0, "pattern":"(\\d{4}-\\d{2}-\\d{2})(T\\d{2}:\\d{2}:\\d{2}Z)?" }, "ZonedDateTime":{ diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 8841e1dbd067..b002c1cc7518 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databasemigration/src/main/resources/codegen-resources/service-2.json b/services/databasemigration/src/main/resources/codegen-resources/service-2.json index b49f89f66b5a..96b25bdfdc65 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/service-2.json +++ b/services/databasemigration/src/main/resources/codegen-resources/service-2.json @@ -808,7 +808,7 @@ "documentation":"

The name of the Availability Zone.

" } }, - "documentation":"

The name of the Availability Zone for use during database migration.

" + "documentation":"

The name of an Availability Zone for use during database migration.

" }, "AvailabilityZonesList":{ "type":"list", @@ -887,7 +887,7 @@ }, "Status":{ "shape":"String", - "documentation":"

The connection status.

" + "documentation":"

The connection status. This parameter can return one of the following values:

  • \"successful\"

  • \"testing\"

  • \"failed\"

  • \"deleting\"

" }, "LastFailureMessage":{ "shape":"String", @@ -926,7 +926,7 @@ }, "EngineName":{ "shape":"String", - "documentation":"

The type of engine for the endpoint. Valid values, depending on the EndpointType value, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", and \"sqlserver\".

" + "documentation":"

The type of engine for the endpoint. Valid values, depending on the EndpointType value, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", \"sqlserver\", and \"neptune\".

" }, "Username":{ "shape":"String", @@ -990,19 +990,23 @@ }, "MongoDbSettings":{ "shape":"MongoDbSettings", - "documentation":"

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the configuration properties section in Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" + "documentation":"

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" }, "KinesisSettings":{ "shape":"KinesisSettings", - "documentation":"

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For information about other available settings, see Using Object Mapping to Migrate Data to a Kinesis Data Stream in the AWS Database Migration User Guide.

" + "documentation":"

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" }, "KafkaSettings":{ "shape":"KafkaSettings", - "documentation":"

Settings in JSON format for the target Apache Kafka endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to Apache Kafka in the AWS Database Migration User Guide.

" + "documentation":"

Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" }, "ElasticsearchSettings":{ "shape":"ElasticsearchSettings", - "documentation":"

Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration User Guide.

" + "documentation":"

Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration Service User Guide.

" + }, + "NeptuneSettings":{ + "shape":"NeptuneSettings", + "documentation":"

Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying Endpoint Settings for Amazon Neptune as a Target in the AWS Database Migration Service User Guide.

" }, "RedshiftSettings":{"shape":"RedshiftSettings"} }, @@ -1075,7 +1079,7 @@ "members":{ "ReplicationInstanceIdentifier":{ "shape":"String", - "documentation":"

The replication instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

Example: myrepinstance

" + "documentation":"

The replication instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain 1-63 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

Example: myrepinstance

" }, "AllocatedStorage":{ "shape":"IntegerOptional", @@ -1083,7 +1087,7 @@ }, "ReplicationInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the replication instance as specified by the replication instance class.

Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

" + "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", @@ -1127,7 +1131,7 @@ }, "DnsNameServers":{ "shape":"String", - "documentation":"

A list of DNS name servers supported for the replication instance.

" + "documentation":"

A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a comma-separated list of internet addresses for up to four on-premise DNS name servers. For example: \"1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4\"

" } }, "documentation":"

" @@ -1192,7 +1196,7 @@ "members":{ "ReplicationTaskIdentifier":{ "shape":"String", - "documentation":"

An identifier for the replication task.

Constraints:

  • Must contain from 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" + "documentation":"

An identifier for the replication task.

Constraints:

  • Must contain 1-255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" }, "SourceEndpointArn":{ "shape":"String", @@ -1212,11 +1216,11 @@ }, "TableMappings":{ "shape":"String", - "documentation":"

The table mappings for the task, in JSON format. For more information, see Table Mapping in the AWS Database Migration User Guide.

" + "documentation":"

The table mappings for the task, in JSON format. For more information, see Using Table Mapping to Specify Task Settings in the AWS Database Migration Service User Guide.

" }, "ReplicationTaskSettings":{ "shape":"String", - "documentation":"

Overall settings for the task, in JSON format. For more information, see Task Settings in the AWS Database Migration User Guide.

" + "documentation":"

Overall settings for the task, in JSON format. For more information, see Specifying Task Settings for AWS Database Migration Service Tasks in the AWS Database Migration User Guide.

" }, "CdcStartTime":{ "shape":"TStamp", @@ -1233,6 +1237,10 @@ "Tags":{ "shape":"TagList", "documentation":"

One or more tags to be assigned to the replication task.

" + }, + "TaskData":{ + "shape":"String", + "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.

" } }, "documentation":"

" @@ -1821,7 +1829,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

Filters applied to the describe action.

" + "documentation":"

Filters applied to the describe action.

Valid filter names: replication-subnet-group-id

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -2082,7 +2090,7 @@ }, "EngineName":{ "shape":"String", - "documentation":"

The database engine name. Valid values, depending on the EndpointType, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", and \"sqlserver\".

" + "documentation":"

The database engine name. Valid values, depending on the EndpointType, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", \"sqlserver\", and \"neptune\".

" }, "EngineDisplayName":{ "shape":"String", @@ -2168,6 +2176,10 @@ "shape":"ElasticsearchSettings", "documentation":"

The settings for the Elasticsearch source endpoint. For more information, see the ElasticsearchSettings structure.

" }, + "NeptuneSettings":{ + "shape":"NeptuneSettings", + "documentation":"

The settings for the Amazon Neptune target endpoint. For more information, see the NeptuneSettings structure.

" + }, "RedshiftSettings":{ "shape":"RedshiftSettings", "documentation":"

Settings for the Amazon Redshift endpoint.

" @@ -2252,7 +2264,7 @@ }, "SubscriptionCreationTime":{ "shape":"String", - "documentation":"

The time the RDS event notification subscription was created.

" + "documentation":"

The time the AWS DMS event notification subscription was created.

" }, "SourceType":{ "shape":"String", @@ -2540,7 +2552,7 @@ }, "EngineName":{ "shape":"String", - "documentation":"

The type of engine for the endpoint. Valid values, depending on the EndpointType, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", and \"sqlserver\".

" + "documentation":"

The type of engine for the endpoint. Valid values, depending on the EndpointType, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", \"sqlserver\", and \"neptune\".

" }, "Username":{ "shape":"String", @@ -2600,15 +2612,19 @@ }, "KinesisSettings":{ "shape":"KinesisSettings", - "documentation":"

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For information about other available settings, see Using Object Mapping to Migrate Data to a Kinesis Data Stream in the AWS Database Migration User Guide.

" + "documentation":"

Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. For more information about the available settings, see Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" }, "KafkaSettings":{ "shape":"KafkaSettings", - "documentation":"

Settings in JSON format for the target Apache Kafka endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to Apache Kafka in the AWS Database Migration User Guide.

" + "documentation":"

Settings in JSON format for the target Apache Kafka endpoint. For more information about the available settings, see Using Apache Kafka as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" }, "ElasticsearchSettings":{ "shape":"ElasticsearchSettings", - "documentation":"

Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration User Guide.

" + "documentation":"

Settings in JSON format for the target Elasticsearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS in the AWS Database Migration Service User Guide.

" + }, + "NeptuneSettings":{ + "shape":"NeptuneSettings", + "documentation":"

Settings in JSON format for the target Amazon Neptune endpoint. For more information about the available settings, see Specifying Endpoint Settings for Amazon Neptune as a Target in the AWS Database Migration Service User Guide.

" }, "RedshiftSettings":{"shape":"RedshiftSettings"} }, @@ -2679,7 +2695,7 @@ }, "ReplicationInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the replication instance.

Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

" + "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", @@ -2764,7 +2780,7 @@ }, "ReplicationTaskIdentifier":{ "shape":"String", - "documentation":"

The replication task identifier.

Constraints:

  • Must contain from 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" + "documentation":"

The replication task identifier.

Constraints:

  • Must contain 1-255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" }, "MigrationType":{ "shape":"MigrationTypeValue", @@ -2776,7 +2792,7 @@ }, "ReplicationTaskSettings":{ "shape":"String", - "documentation":"

JSON file that contains settings for the task, such as target metadata settings.

" + "documentation":"

JSON file that contains settings for the task, such as task metadata settings.

" }, "CdcStartTime":{ "shape":"TStamp", @@ -2789,6 +2805,10 @@ "CdcStopPosition":{ "shape":"String", "documentation":"

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 “

" + }, + "TaskData":{ + "shape":"String", + "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.

" } }, "documentation":"

" @@ -2828,27 +2848,27 @@ }, "AuthType":{ "shape":"AuthTypeValue", - "documentation":"

The authentication type you use to access the MongoDB source endpoint.

Valid values: NO, PASSWORD

When NO is selected, user name and password parameters are not used and can be empty.

" + "documentation":"

The authentication type you use to access the MongoDB source endpoint.

When when set to \"no\", user name and password parameters are not used and can be empty.

" }, "AuthMechanism":{ "shape":"AuthMechanismValue", - "documentation":"

The authentication mechanism you use to access the MongoDB source endpoint.

Valid values: DEFAULT, MONGODB_CR, SCRAM_SHA_1

DEFAULT – For MongoDB version 2.x, use MONGODB_CR. For MongoDB version 3.x, use SCRAM_SHA_1. This setting isn't used when authType=No.

" + "documentation":"

The authentication mechanism you use to access the MongoDB source endpoint.

For the default value, in MongoDB version 2.x, \"default\" is \"mongodb_cr\". For MongoDB version 3.x or later, \"default\" is \"scram_sha_1\". This setting isn't used when AuthType is set to \"no\".

" }, "NestingLevel":{ "shape":"NestingLevelValue", - "documentation":"

Specifies either document or table mode.

Valid values: NONE, ONE

Default value is NONE. Specify NONE to use document mode. Specify ONE to use table mode.

" + "documentation":"

Specifies either document or table mode.

Default value is \"none\". Specify \"none\" to use document mode. Specify \"one\" to use table mode.

" }, "ExtractDocId":{ "shape":"String", - "documentation":"

Specifies the document ID. Use this setting when NestingLevel is set to NONE.

Default value is false.

" + "documentation":"

Specifies the document ID. Use this setting when NestingLevel is set to \"none\".

Default value is \"false\".

" }, "DocsToInvestigate":{ "shape":"String", - "documentation":"

Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel is set to ONE.

Must be a positive value greater than 0. Default value is 1000.

" + "documentation":"

Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel is set to \"one\".

Must be a positive value greater than 0. Default value is 1000.

" }, "AuthSource":{ "shape":"String", - "documentation":"

The MongoDB database name. This setting isn't used when authType=NO.

The default is admin.

" + "documentation":"

The MongoDB database name. This setting isn't used when AuthType is set to \"no\".

The default is \"admin\".

" }, "KmsKeyId":{ "shape":"String", @@ -2857,6 +2877,44 @@ }, "documentation":"

Provides information that defines a MongoDB endpoint.

" }, + "NeptuneSettings":{ + "type":"structure", + "required":[ + "S3BucketName", + "S3BucketFolder" + ], + "members":{ + "ServiceAccessRoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the service role that you created for the Neptune target endpoint. For more information, see Creating an IAM Service Role for Accessing Amazon Neptune as a Target in the AWS Database Migration Service User Guide.

" + }, + "S3BucketName":{ + "shape":"String", + "documentation":"

The name of the Amazon S3 bucket where AWS DMS can temporarily store migrated graph data in .csv files before bulk-loading it to the Neptune target database. AWS DMS maps the SQL source data to graph data before storing it in these .csv files.

" + }, + "S3BucketFolder":{ + "shape":"String", + "documentation":"

A folder path where you want AWS DMS to store migrated graph data in the S3 bucket specified by S3BucketName

" + }, + "ErrorRetryDuration":{ + "shape":"IntegerOptional", + "documentation":"

The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated graph data to the Neptune target database before raising an error. The default is 250.

" + }, + "MaxFileSize":{ + "shape":"IntegerOptional", + "documentation":"

The maximum size in kilobytes of migrated graph data stored in a .csv file before AWS DMS bulk-loads the data to the Neptune target database. The default is 1,048,576 KB. If the bulk load is successful, AWS DMS clears the bucket, ready to store the next batch of migrated graph data.

" + }, + "MaxRetryCount":{ + "shape":"IntegerOptional", + "documentation":"

The number of times for AWS DMS to retry a bulk load of migrated graph data to the Neptune target database before raising an error. The default is 5.

" + }, + "IamAuthEnabled":{ + "shape":"BooleanOptional", + "documentation":"

If you want AWS Identity and Access Management (IAM) authorization enabled for this endpoint, set this parameter to true. Then attach the appropriate IAM policy document to your service role specified by ServiceAccessRoleArn. The default is false.

" + } + }, + "documentation":"

Provides information that defines an Amazon Neptune endpoint.

" + }, "NestingLevelValue":{ "type":"string", "enum":[ @@ -2873,7 +2931,7 @@ }, "ReplicationInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the replication instance.

Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

" + "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

" }, "StorageType":{ "shape":"String", @@ -3223,15 +3281,15 @@ "members":{ "ReplicationInstanceIdentifier":{ "shape":"String", - "documentation":"

The replication instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: myrepinstance

" + "documentation":"

The replication instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain 1-63 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: myrepinstance

" }, "ReplicationInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the replication instance.

Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

" + "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

" }, "ReplicationInstanceStatus":{ "shape":"String", - "documentation":"

The status of the replication instance.

" + "documentation":"

The status of the replication instance. The possible return values include:

  • \"available\"

  • \"creating\"

  • \"deleted\"

  • \"deleting\"

  • \"failed\"

  • \"modifying\"

  • \"upgrading\"

  • \"rebooting\"

  • \"resetting-master-credentials\"

  • \"storage-full\"

  • \"incompatible-credentials\"

  • \"incompatible-network\"

  • \"maintenance\"

" }, "AllocatedStorage":{ "shape":"Integer", @@ -3313,7 +3371,7 @@ }, "DnsNameServers":{ "shape":"String", - "documentation":"

The DNS name servers for the replication instance.

" + "documentation":"

The DNS name servers supported for the replication instance to access your on-premise source or target database.

" } }, "documentation":"

Provides information that defines a replication instance.

" @@ -3357,7 +3415,7 @@ "members":{ "ReplicationInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the replication instance.

Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge

" + "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

" }, "AllocatedStorage":{ "shape":"IntegerOptional", @@ -3420,7 +3478,7 @@ "members":{ "ReplicationTaskIdentifier":{ "shape":"String", - "documentation":"

The user-assigned replication task identifier or name.

Constraints:

  • Must contain from 1 to 255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" + "documentation":"

The user-assigned replication task identifier or name.

Constraints:

  • Must contain 1-255 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

" }, "SourceEndpointArn":{ "shape":"String", @@ -3456,7 +3514,7 @@ }, "StopReason":{ "shape":"String", - "documentation":"

The reason the replication task was stopped.

" + "documentation":"

The reason the replication task was stopped. This response parameter can return one of the following values:

  • \"STOP_REASON_FULL_LOAD_COMPLETED\" – Full-load migration completed.

  • \"STOP_REASON_CACHED_CHANGES_APPLIED\" – Change data capture (CDC) load completed.

  • \"STOP_REASON_CACHED_CHANGES_NOT_APPLIED\" – In a full-load and CDC migration, the full-load stopped as specified before starting the CDC migration.

  • \"STOP_REASON_SERVER_TIME\" – The migration stopped at the specified server time.

" }, "ReplicationTaskCreationDate":{ "shape":"TStamp", @@ -3485,6 +3543,10 @@ "ReplicationTaskStats":{ "shape":"ReplicationTaskStats", "documentation":"

The statistics for the task, including elapsed time, tables loaded, and table errors.

" + }, + "TaskData":{ + "shape":"String", + "documentation":"

Supplemental information that the task requires to migrate the data for certain source and target endpoints. For more information, see Specifying Supplemental Data for Task Settings in the AWS Database Migration Service User Guide.

" } }, "documentation":"

Provides information that describes a replication task created by the CreateReplicationTask operation.

" @@ -3902,7 +3964,7 @@ "members":{ "EngineName":{ "shape":"String", - "documentation":"

The database engine name. Valid values, depending on the EndpointType, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", and \"sqlserver\".

" + "documentation":"

The database engine name. Valid values, depending on the EndpointType, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", \"sqlserver\", and \"neptune\".

" }, "SupportsCDC":{ "shape":"Boolean", @@ -3912,6 +3974,10 @@ "shape":"ReplicationEndpointTypeValue", "documentation":"

The type of endpoint. Valid values are source and target.

" }, + "ReplicationInstanceEngineMinimumVersion":{ + "shape":"String", + "documentation":"

The earliest AWS DMS engine version that supports this endpoint engine. Note that endpoint engines released with AWS DMS versions earlier than 3.1.1 do not return a value for this parameter.

" + }, "EngineDisplayName":{ "shape":"String", "documentation":"

The expanded name for the engine name. For example, if the EngineName parameter is \"aurora,\" this value would be \"Amazon Aurora MySQL.\"

" @@ -4033,11 +4099,11 @@ "members":{ "Key":{ "shape":"String", - "documentation":"

A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

" + "documentation":"

A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regular expressions: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

" }, "Value":{ "shape":"String", - "documentation":"

A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

" + "documentation":"

A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with \"aws:\" or \"dms:\". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regular expressions: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

" } }, "documentation":"

A user-defined key-value pair that describes metadata added to an AWS DMS resource and that is used by operations such as the following:

  • AddTagsToResource

  • ListTagsForResource

  • RemoveTagsFromResource

" @@ -4094,14 +4160,14 @@ "members":{ "VpcSecurityGroupId":{ "shape":"String", - "documentation":"

The VPC security group Id.

" + "documentation":"

The VPC security group ID.

" }, "Status":{ "shape":"String", "documentation":"

The status of the VPC security group.

" } }, - "documentation":"

Describes status of a security group associated with the virtual private cloud hosting your replication and DB instances.

" + "documentation":"

Describes the status of a security group associated with the virtual private cloud (VPC) hosting your replication and DB instances.

" }, "VpcSecurityGroupMembershipList":{ "type":"list", diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index 860ba9a16593..9cb4da8be975 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/dataexchange/src/main/resources/codegen-resources/service-2.json b/services/dataexchange/src/main/resources/codegen-resources/service-2.json index 8057f64d11f5..7074c6b0b29f 100644 --- a/services/dataexchange/src/main/resources/codegen-resources/service-2.json +++ b/services/dataexchange/src/main/resources/codegen-resources/service-2.json @@ -1336,6 +1336,10 @@ "shape": "Id", "documentation": "

The unique identifier for the data set associated with this export job.

" }, + "Encryption": { + "shape": "ExportServerSideEncryption", + "documentation": "

Encryption configuration for the export job.

" + }, "RevisionId": { "shape": "Id", "documentation": "

The unique identifier for the revision associated with this export request.

" @@ -1359,6 +1363,10 @@ "shape": "Id", "documentation": "

The unique identifier for the data set associated with this export job.

" }, + "Encryption": { + "shape": "ExportServerSideEncryption", + "documentation": "

Encryption configuration of the export job.

" + }, "RevisionId": { "shape": "Id", "documentation": "

The unique identifier for the revision associated with this export response.

" @@ -1371,6 +1379,23 @@ "RevisionId" ] }, + "ExportServerSideEncryption": { + "type": "structure", + "members": { + "KmsKeyArn": { + "shape": "__string", + "documentation": "

The Amazon Resource Name (ARN) of the the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.

" + }, + "Type": { + "shape": "ServerSideEncryptionTypes", + "documentation": "

The type of server side encryption used for encrypting the objects in Amazon S3.

" + } + }, + "documentation": "

Encryption configuration of the export job. Includes the encryption type as well as the AWS KMS key. The KMS key is only necessary if you chose the KMS encryption type.

", + "required": [ + "Type" + ] + }, "GetAssetRequest": { "type": "structure", "members": { @@ -1832,7 +1857,7 @@ }, "ResourceId": { "shape": "__string", - "documentation": "The unqiue identifier for the resource related to the error." + "documentation": "The unique identifier for the resource related to the error." }, "ResourceType": { "shape": "JobErrorResourceTypes", @@ -2247,6 +2272,14 @@ "Size" ] }, + "ServerSideEncryptionTypes": { + "type": "string", + "documentation": "

The types of encryption supported in export jobs to Amazon S3.

", + "enum": [ + "aws:kms", + "AES256" + ] + }, "ServiceLimitExceededException": { "type": "structure", "members": { @@ -2671,25 +2704,7 @@ "type": "string", "min": 24, "max": 24, - "pattern": "/^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$/" - } - }, - "authorizers": { - "create_job_authorizer": { - "name": "create_job_authorizer", - "type": "provided", - "placement": { - "location": "header", - "name": "Authorization" - } - }, - "start_cancel_get_job_authorizer": { - "name": "start_cancel_get_job_authorizer", - "type": "provided", - "placement": { - "location": "header", - "name": "Authorization" - } + "pattern": "/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/" } }, "documentation": "

AWS Data Exchange is a service that makes it easy for AWS customers to exchange data in the cloud. You can use the AWS Data Exchange APIs to create, update, manage, and access file-based data set in the AWS Cloud.

As a subscriber, you can view and access the data sets that you have an entitlement to through a subscription. You can use the APIS to download or copy your entitled data sets to Amazon S3 for use across a variety of AWS analytics and machine learning services.

As a provider, you can create and manage your data sets that you would like to publish to a product. Being able to package and provide your data sets into products requires a few steps to determine eligibility. For more information, visit the AWS Data Exchange User Guide.

A data set is a collection of data that can be changed or updated over time. Data sets can be updated using revisions, which represent a new version or incremental change to a data set. A revision contains one or more assets. An asset in AWS Data Exchange is a piece of data that can be stored as an Amazon S3 object. The asset can be a structured data file, an image file, or some other data file. Jobs are asynchronous import or export operations used to create or copy assets.

" diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index 998430b94ba5..f9b9c505e455 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index 29efe04f9397..9af62880a0c3 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/dax/pom.xml b/services/dax/pom.xml index b47925a6915a..0828c677aaa0 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/detective/pom.xml b/services/detective/pom.xml index 8185ecc3741b..9554d396f920 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index 6bc5025f074e..6808424944d6 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index d902ccb0f282..ae68451c3528 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directconnect/src/main/resources/codegen-resources/service-2.json b/services/directconnect/src/main/resources/codegen-resources/service-2.json index 05dfa5373324..5aef9e5a7f4f 100644 --- a/services/directconnect/src/main/resources/codegen-resources/service-2.json +++ b/services/directconnect/src/main/resources/codegen-resources/service-2.json @@ -321,7 +321,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a private virtual interface. A virtual interface is the VLAN that transports AWS Direct Connect traffic. A private virtual interface can be connected to either a Direct Connect gateway or a Virtual Private Gateway (VGW). Connecting the private virtual interface to a Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different AWS Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region.

" + "documentation":"

Creates a private virtual interface. A virtual interface is the VLAN that transports AWS Direct Connect traffic. A private virtual interface can be connected to either a Direct Connect gateway or a Virtual Private Gateway (VGW). Connecting the private virtual interface to a Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different AWS Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region.

Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" }, "CreatePublicVirtualInterface":{ "name":"CreatePublicVirtualInterface", @@ -353,7 +353,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Creates a transit virtual interface. A transit virtual interface should be used to access one or more transit gateways associated with Direct Connect gateways. A transit virtual interface enables the connection of multiple VPCs attached to a transit gateway to a Direct Connect gateway.

If you associate your transit gateway with one or more Direct Connect gateways, the Autonomous System Number (ASN) used by the transit gateway and the Direct Connect gateway must be different. For example, if you use the default ASN 64512 for both your the transit gateway and Direct Connect gateway, the association request fails.

" + "documentation":"

Creates a transit virtual interface. A transit virtual interface should be used to access one or more transit gateways associated with Direct Connect gateways. A transit virtual interface enables the connection of multiple VPCs attached to a transit gateway to a Direct Connect gateway.

If you associate your transit gateway with one or more Direct Connect gateways, the Autonomous System Number (ASN) used by the transit gateway and the Direct Connect gateway must be different. For example, if you use the default ASN 64512 for both your the transit gateway and Direct Connect gateway, the association request fails.

Setting the MTU of a virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" }, "DeleteBGPPeer":{ "name":"DeleteBGPPeer", @@ -706,6 +706,48 @@ ], "documentation":"

Disassociates a connection from a link aggregation group (LAG). The connection is interrupted and re-established as a standalone connection (the connection is not deleted; to delete the connection, use the DeleteConnection request). If the LAG has associated virtual interfaces or hosted connections, they remain associated with the LAG. A disassociated connection owned by an AWS Direct Connect Partner is automatically converted to an interconnect.

If disassociating the connection would cause the LAG to fall below its setting for minimum number of operational connections, the request fails, except when it's the last member of the LAG. If all connections are disassociated, the LAG continues to exist as an empty LAG with no physical connections.

" }, + "ListVirtualInterfaceTestHistory":{ + "name":"ListVirtualInterfaceTestHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVirtualInterfaceTestHistoryRequest"}, + "output":{"shape":"ListVirtualInterfaceTestHistoryResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ], + "documentation":"

Lists the virtual interface failover test history.

" + }, + "StartBgpFailoverTest":{ + "name":"StartBgpFailoverTest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartBgpFailoverTestRequest"}, + "output":{"shape":"StartBgpFailoverTestResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ], + "documentation":"

Starts the virtual interface failover test that verifies your configuration meets your resiliency requirements by placing the BGP peering session in the DOWN state. You can then send traffic to verify that there are no outages.

You can run the test on public, private, transit, and hosted virtual interfaces.

You can use ListVirtualInterfaceTestHistory to view the virtual interface test history.

If you need to stop the test before the test interval completes, use StopBgpFailoverTest.

" + }, + "StopBgpFailoverTest":{ + "name":"StopBgpFailoverTest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopBgpFailoverTestRequest"}, + "output":{"shape":"StopBgpFailoverTestResponse"}, + "errors":[ + {"shape":"DirectConnectServerException"}, + {"shape":"DirectConnectClientException"} + ], + "documentation":"

Stops the virtual interface failover test.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -776,7 +818,7 @@ {"shape":"DirectConnectServerException"}, {"shape":"DirectConnectClientException"} ], - "documentation":"

Updates the specified attributes of the specified virtual private interface.

Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces.

" + "documentation":"

Updates the specified attributes of the specified virtual private interface.

Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. To check whether your virtual q interface supports jumbo frames, call DescribeVirtualInterfaces.

" } }, "shapes":{ @@ -1088,6 +1130,10 @@ "documentation":"

Information about a BGP peer.

" }, "BGPPeerId":{"type":"string"}, + "BGPPeerIdList":{ + "type":"list", + "member":{"shape":"BGPPeerId"} + }, "BGPPeerList":{ "type":"list", "member":{"shape":"BGPPeer"} @@ -2286,7 +2332,9 @@ "documentation":"

A tag key was specified more than once.

", "exception":true }, + "EndTime":{"type":"timestamp"}, "ErrorMessage":{"type":"string"}, + "FailureTestHistoryStatus":{"type":"string"}, "GatewayIdToAssociate":{"type":"string"}, "GatewayIdentifier":{"type":"string"}, "GatewayType":{ @@ -2495,6 +2543,48 @@ } } }, + "ListVirtualInterfaceTestHistoryRequest":{ + "type":"structure", + "members":{ + "testId":{ + "shape":"TestId", + "documentation":"

The ID of the virtual interface failover test.

" + }, + "virtualInterfaceId":{ + "shape":"VirtualInterfaceId", + "documentation":"

The ID of the virtual interface that was tested.

" + }, + "bgpPeers":{ + "shape":"BGPPeerIdList", + "documentation":"

The BGP peers that were placed in the DOWN state during the virtual interface failover test.

" + }, + "status":{ + "shape":"FailureTestHistoryStatus", + "documentation":"

The status of the virtual interface failover test.

" + }, + "maxResults":{ + "shape":"MaxResultSetSize", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

If MaxResults is given a value larger than 100, only 100 results are returned.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next page of results.

" + } + } + }, + "ListVirtualInterfaceTestHistoryResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceTestHistory":{ + "shape":"VirtualInterfaceTestHistoryList", + "documentation":"

The ID of the tested virtual interface.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, "Loa":{ "type":"structure", "members":{ @@ -2598,7 +2688,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2653,7 +2743,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2700,7 +2790,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2747,7 +2837,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2789,7 +2879,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2835,7 +2925,7 @@ "members":{ "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -2920,7 +3010,54 @@ "member":{"shape":"RouteFilterPrefix"} }, "RouterConfig":{"type":"string"}, + "StartBgpFailoverTestRequest":{ + "type":"structure", + "required":["virtualInterfaceId"], + "members":{ + "virtualInterfaceId":{ + "shape":"VirtualInterfaceId", + "documentation":"

The ID of the virtual interface you want to test.

" + }, + "bgpPeers":{ + "shape":"BGPPeerIdList", + "documentation":"

The BGP peers to place in the DOWN state.

" + }, + "testDurationInMinutes":{ + "shape":"TestDuration", + "documentation":"

The time in minutes that the virtual interface failover test will last.

Maximum value: 180 minutes (3 hours).

Default: 180 minutes (3 hours).

" + } + } + }, + "StartBgpFailoverTestResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceTest":{ + "shape":"VirtualInterfaceTestHistory", + "documentation":"

Information about the virtual interface failover test.

" + } + } + }, + "StartTime":{"type":"timestamp"}, "StateChangeError":{"type":"string"}, + "StopBgpFailoverTestRequest":{ + "type":"structure", + "required":["virtualInterfaceId"], + "members":{ + "virtualInterfaceId":{ + "shape":"VirtualInterfaceId", + "documentation":"

The ID of the virtual interface you no longer want to test.

" + } + } + }, + "StopBgpFailoverTestResponse":{ + "type":"structure", + "members":{ + "virtualInterfaceTest":{ + "shape":"VirtualInterfaceTestHistory", + "documentation":"

Information about the virtual interface failover test.

" + } + } + }, "Tag":{ "type":"structure", "required":["key"], @@ -2979,6 +3116,11 @@ "min":0, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, + "TestDuration":{ + "type":"integer", + "box":true + }, + "TestId":{"type":"string"}, "TooManyTagsException":{ "type":"structure", "members":{ @@ -3122,7 +3264,7 @@ }, "virtualInterfaceName":{ "shape":"VirtualInterfaceName", - "documentation":"

The name of the virtual interface assigned by the customer network.

" + "documentation":"

The name of the virtual interface assigned by the customer network. The name has a maximum of 100 characters. The following are valid characters: a-z, 0-9 and a hyphen (-).

" }, "vlan":{ "shape":"VLAN", @@ -3220,6 +3362,48 @@ "unknown" ] }, + "VirtualInterfaceTestHistory":{ + "type":"structure", + "members":{ + "testId":{ + "shape":"TestId", + "documentation":"

The ID of the virtual interface failover test.

" + }, + "virtualInterfaceId":{ + "shape":"VirtualInterfaceId", + "documentation":"

The ID of the tested virtual interface.

" + }, + "bgpPeers":{ + "shape":"BGPPeerIdList", + "documentation":"

The BGP peers that were put in the DOWN state as part of the virtual interface failover test.

" + }, + "status":{ + "shape":"FailureTestHistoryStatus", + "documentation":"

The status of the virtual interface failover test.

" + }, + "ownerAccount":{ + "shape":"OwnerAccount", + "documentation":"

The owner ID of the tested virtual interface.

" + }, + "testDurationInMinutes":{ + "shape":"TestDuration", + "documentation":"

The time that the virtual interface failover test ran in minutes.

" + }, + "startTime":{ + "shape":"StartTime", + "documentation":"

The time that the virtual interface moves to the DOWN state.

" + }, + "endTime":{ + "shape":"EndTime", + "documentation":"

The time that the virtual interface moves out of the DOWN state.

" + } + }, + "documentation":"

Information about the virtual interface failover test.

" + }, + "VirtualInterfaceTestHistoryList":{ + "type":"list", + "member":{"shape":"VirtualInterfaceTestHistory"} + }, "VirtualInterfaceType":{"type":"string"}, "VirtualInterfaces":{ "type":"structure", diff --git a/services/directory/pom.xml b/services/directory/pom.xml index 99f767791897..c4eec4545fa6 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index 59540ef94474..36217e56647b 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/dlm/src/main/resources/codegen-resources/service-2.json b/services/dlm/src/main/resources/codegen-resources/service-2.json index 76ff6e7883cb..22948588a367 100644 --- a/services/dlm/src/main/resources/codegen-resources/service-2.json +++ b/services/dlm/src/main/resources/codegen-resources/service-2.json @@ -204,14 +204,10 @@ }, "CreateRule":{ "type":"structure", - "required":[ - "Interval", - "IntervalUnit" - ], "members":{ "Interval":{ "shape":"Interval", - "documentation":"

The interval between snapshots. The supported values are 2, 3, 4, 6, 8, 12, and 24.

" + "documentation":"

The interval between snapshots. The supported values are 1, 2, 3, 4, 6, 8, 12, and 24.

" }, "IntervalUnit":{ "shape":"IntervalUnitValues", @@ -219,10 +215,20 @@ }, "Times":{ "shape":"TimesList", - "documentation":"

The time, in UTC, to start the operation. The supported format is hh:mm.

The operation occurs within a one-hour window following the specified time.

" + "documentation":"

The time, in UTC, to start the operation. The supported format is hh:mm.

The operation occurs within a one-hour window following the specified time. If you do not specify a time, Amazon DLM selects a time within the next 24 hours.

" + }, + "CronExpression":{ + "shape":"CronExpression", + "documentation":"

The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. For more information, see Cron expressions in the Amazon CloudWatch User Guide.

" } }, - "documentation":"

Specifies when to create snapshots of EBS volumes.

" + "documentation":"

Specifies when to create snapshots of EBS volumes.

You must specify either a Cron expression or an interval, interval unit, and start time. You cannot specify both.

" + }, + "CronExpression":{ + "type":"string", + "max":106, + "min":17, + "pattern":"cron\\([^\\n]{11,100}\\)" }, "CrossRegionCopyRetainRule":{ "type":"structure", @@ -576,7 +582,7 @@ }, "ResourceTypes":{ "shape":"ResourceTypeValuesList", - "documentation":"

The resource type.

" + "documentation":"

The resource type. Use VOLUME to create snapshots of individual volumes or use INSTANCE to create multi-volume snapshots from the volumes for an instance.

" }, "TargetTags":{ "shape":"TargetTagList", @@ -711,7 +717,7 @@ }, "ScheduleName":{ "type":"string", - "max":500, + "max":120, "min":0, "pattern":"[\\p{all}]*" }, diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index 80ef6ea73b34..858787d9a0c8 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index 58d86f6643d0..4c8cbbc0ff64 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config index 5a48a51b841a..ef3e95c5e70c 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config @@ -32,5 +32,6 @@ "listWebACLs", "listXssMatchSets" ], - "customRetryPolicy" : "software.amazon.awssdk.services.dynamodb.DynamoDbRetryPolicy" + "customRetryPolicy" : "software.amazon.awssdk.services.dynamodb.DynamoDbRetryPolicy", + "enableEndpointDiscoveryMethodRequired": true } diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json index 28bd407ac755..f3691ff9eb9a 100755 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json @@ -84,7 +84,7 @@ {"shape":"GlobalTableAlreadyExistsException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions.

This method only applies to Version 2017.11.29 of global tables.

If you want to add a new replica table to a global table, each of the following conditions must be true:

  • The table must have the same primary key as all of the other replicas.

  • The table must have the same name as all of the other replicas.

  • The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.

  • None of the replica tables in the global table can contain any data.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

", + "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions.

This operation only applies to Version 2017.11.29 of global tables.

If you want to add a new replica table to a global table, each of the following conditions must be true:

  • The table must have the same primary key as all of the other replicas.

  • The table must have the same name as all of the other replicas.

  • The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.

  • None of the replica tables in the global table can contain any data.

If global secondary indexes are specified, then the following conditions must also be met:

  • The global secondary indexes must have the same name.

  • The global secondary indexes must have the same hash key and sort key (if present).

If local secondary indexes are specified, then the following conditions must also be met:

  • The local secondary indexes must have the same name.

  • The local secondary indexes must have the same hash key and sort key (if present).

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

", "endpointdiscovery":{ } }, @@ -231,7 +231,7 @@ {"shape":"InternalServerError"}, {"shape":"GlobalTableNotFoundException"} ], - "documentation":"

Returns information about the specified global table.

This method only applies to Version 2017.11.29 of global tables.

", + "documentation":"

Returns information about the specified global table.

This operation only applies to Version 2017.11.29 of global tables. If you are using global tables Version 2019.11.21 you can use DescribeTable instead.

", "endpointdiscovery":{ } }, @@ -247,7 +247,7 @@ {"shape":"GlobalTableNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes Region-specific settings for a global table.

This method only applies to Version 2017.11.29 of global tables.

", + "documentation":"

Describes Region-specific settings for a global table.

This operation only applies to Version 2017.11.29 of global tables.

", "endpointdiscovery":{ } }, @@ -294,7 +294,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes auto scaling settings across replicas of the global table at once.

This method only applies to Version 2019.11.21 of global tables.

" + "documentation":"

Describes auto scaling settings across replicas of the global table at once.

This operation only applies to Version 2019.11.21 of global tables.

" }, "DescribeTimeToLive":{ "name":"DescribeTimeToLive", @@ -370,7 +370,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists all global tables that have a replica in the specified Region.

This method only applies to Version 2017.11.29 of global tables.

", + "documentation":"

Lists all global tables that have a replica in the specified Region.

This operation only applies to Version 2017.11.29 of global tables.

", "endpointdiscovery":{ } }, @@ -422,7 +422,7 @@ {"shape":"RequestLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attributes are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

", + "documentation":"

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attributes are the only required attributes. Attribute values cannot be null.

Empty String and Binary attribute values are allowed. Attribute values of type String and Binary must have a length greater than zero if the attribute is used as a key attribute for a table or index. Set type attributes cannot be empty.

Invalid Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

", "endpointdiscovery":{ } }, @@ -700,7 +700,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates auto scaling settings on your global tables at once.

This method only applies to Version 2019.11.21 of global tables.

" + "documentation":"

Updates auto scaling settings on your global tables at once.

This operation only applies to Version 2019.11.21 of global tables.

" }, "UpdateTimeToLive":{ "name":"UpdateTimeToLive", @@ -3193,7 +3193,7 @@ }, "Item":{ "shape":"PutItemInputAttributeMap", - "documentation":"

A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item.

You must provide all of the attributes for the primary key. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide both values for both the partition key and the sort key.

If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide.

Each element in the Item map is an AttributeValue object.

" + "documentation":"

A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item.

You must provide all of the attributes for the primary key. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide both values for both the partition key and the sort key.

If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.

Empty String and Binary attribute values are allowed. Attribute values of type String and Binary must have a length greater than zero if the attribute is used as a key attribute for a table or index.

For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide.

Each element in the Item map is an AttributeValue object.

" }, "Expected":{ "shape":"ExpectedAttributeMap", diff --git a/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/EmptyAttributeTest.java b/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/EmptyAttributeTest.java new file mode 100644 index 000000000000..eeee13ceeff7 --- /dev/null +++ b/services/dynamodb/src/test/java/software/amazon/awssdk/services/dynamodb/EmptyAttributeTest.java @@ -0,0 +1,334 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.dynamodb; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.equalTo; +import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static com.github.tomakehurst.wiremock.client.WireMock.verify; +import static org.assertj.core.api.Assertions.assertThat; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.URI; +import java.util.Collections; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; +import software.amazon.awssdk.services.dynamodb.model.GetRecordsResponse; +import software.amazon.awssdk.services.dynamodb.model.StreamRecord; +import software.amazon.awssdk.services.dynamodb.streams.DynamoDbStreamsAsyncClient; +import software.amazon.awssdk.services.dynamodb.streams.DynamoDbStreamsClient; + +public class EmptyAttributeTest { + private static final AttributeValue EMPTY_STRING = AttributeValue.builder().s("").build(); + private static final AttributeValue EMPTY_BINARY = + AttributeValue.builder().b(SdkBytes.fromByteArray(new byte[]{})).build(); + + @Rule + public WireMockRule mockServer = new WireMockRule(0); + + private DynamoDbClient dynamoDbClient; + private DynamoDbAsyncClient dynamoDbAsyncClient; + private DynamoDbStreamsClient dynamoDbStreamsClient; + private DynamoDbStreamsAsyncClient dynamoDbStreamsAsyncClient; + + @Before + public void setup() { + dynamoDbClient = + DynamoDbClient.builder() + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("test", "test"))) + .region(Region.US_WEST_2) + .endpointOverride(URI.create("http://localhost:" + mockServer.port())) + .build(); + + dynamoDbAsyncClient = + DynamoDbAsyncClient.builder() + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("test", "test"))) + .region(Region.US_WEST_2) + .endpointOverride(URI.create("http://localhost:" + mockServer.port())) + .build(); + + dynamoDbStreamsClient = + DynamoDbStreamsClient.builder() + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("test", "test"))) + .region(Region.US_WEST_2) + .endpointOverride(URI.create("http://localhost:" + mockServer.port())) + .build(); + + dynamoDbStreamsAsyncClient = + DynamoDbStreamsAsyncClient.builder() + .credentialsProvider( + StaticCredentialsProvider.create(AwsBasicCredentials.create("test", "test"))) + .region(Region.US_WEST_2) + .endpointOverride(URI.create("http://localhost:" + mockServer.port())) + .build(); } + + @Test + public void syncClient_getItem_emptyString() { + stubFor(any(urlEqualTo("/")) + .willReturn(aResponse().withStatus(200).withBody("{\"Item\": {\"attribute\": {\"S\": \"\"}}}"))); + + GetItemResponse response = dynamoDbClient.getItem(r -> r.tableName("test")); + assertThat(response.item()).containsKey("attribute"); + AttributeValue attributeValue = response.item().get("attribute"); + assertThat(attributeValue.s()).isEmpty(); + } + + @Test + public void asyncClient_getItem_emptyString() { + stubFor(any(urlEqualTo("/")) + .willReturn(aResponse().withStatus(200).withBody("{\"Item\": {\"attribute\": {\"S\": \"\"}}}"))); + + GetItemResponse response = dynamoDbAsyncClient.getItem(r -> r.tableName("test")).join(); + assertThat(response.item()).containsKey("attribute"); + AttributeValue attributeValue = response.item().get("attribute"); + assertThat(attributeValue.s()).isEmpty(); + } + + @Test + public void syncClient_getItem_emptyBinary() { + stubFor(any(urlEqualTo("/")) + .willReturn(aResponse().withStatus(200).withBody("{\"Item\": {\"attribute\": {\"B\": \"\"}}}"))); + + GetItemResponse response = dynamoDbClient.getItem(r -> r.tableName("test")); + assertThat(response.item()).containsKey("attribute"); + AttributeValue attributeValue = response.item().get("attribute"); + assertThat(attributeValue.b().asByteArray()).isEmpty(); + } + + @Test + public void asyncClient_getItem_emptyBinary() { + stubFor(any(urlEqualTo("/")) + .willReturn(aResponse().withStatus(200).withBody("{\"Item\": {\"attribute\": {\"B\": \"\"}}}"))); + + GetItemResponse response = dynamoDbAsyncClient.getItem(r -> r.tableName("test")).join(); + assertThat(response.item()).containsKey("attribute"); + AttributeValue attributeValue = response.item().get("attribute"); + assertThat(attributeValue.b().asByteArray()).isEmpty(); + } + + @Test + public void syncClient_putItem_emptyString() { + stubFor(any(urlEqualTo("/")).willReturn((aResponse().withStatus(200)))); + + dynamoDbClient.putItem(r -> r.item(Collections.singletonMap("stringAttribute", EMPTY_STRING))); + + verify(postRequestedFor(urlEqualTo("/")) + .withRequestBody(equalTo("{\"Item\":{\"stringAttribute\":{\"S\":\"\"}}}"))); + } + + @Test + public void asyncClient_putItem_emptyString() { + stubFor(any(urlEqualTo("/")).willReturn((aResponse().withStatus(200)))); + + dynamoDbAsyncClient.putItem(r -> r.item(Collections.singletonMap("stringAttribute", EMPTY_STRING))).join(); + + verify(postRequestedFor(urlEqualTo("/")) + .withRequestBody(equalTo("{\"Item\":{\"stringAttribute\":{\"S\":\"\"}}}"))); + } + + @Test + public void syncClient_putItem_emptyBinary() { + stubFor(any(urlEqualTo("/")).willReturn((aResponse().withStatus(200)))); + + dynamoDbClient.putItem(r -> r.item(Collections.singletonMap("binaryAttribute", EMPTY_BINARY))); + + verify(postRequestedFor(urlEqualTo("/")) + .withRequestBody(equalTo("{\"Item\":{\"binaryAttribute\":{\"B\":\"\"}}}"))); + } + + @Test + public void asyncClient_putItem_emptyStrring() { + stubFor(any(urlEqualTo("/")).willReturn((aResponse().withStatus(200)))); + + dynamoDbAsyncClient.putItem(r -> r.item(Collections.singletonMap("binaryAttribute", EMPTY_BINARY))).join(); + + verify(postRequestedFor(urlEqualTo("/")) + .withRequestBody(equalTo("{\"Item\":{\"binaryAttribute\":{\"B\":\"\"}}}"))); + } + + @Test + public void syncClient_getRecords_emptyString() { + stubFor(any(urlEqualTo("/")) + .willReturn(aResponse().withStatus(200).withBody( + "{" + + " \"NextShardIterator\": \"arn:aws:dynamodb:us-west-2:111122223333:table/Forum/stream/2015-05-20T20:51:10.252|1|AAAAAAAAAAGQBYshYDEe\",\n" + + " \"Records\": [\n" + + " {\n" + + " \"awsRegion\": \"us-west-2\",\n" + + " \"dynamodb\": {\n" + + " \"ApproximateCreationDateTime\": 1.46480431E9,\n" + + " \"Keys\": {\n" + + " \"stringKey\": {\"S\": \"DynamoDB\"}\n" + + " },\n" + + " \"NewImage\": {\n" + + " \"stringAttribute\": {\"S\": \"\"}\n" + + " },\n" + + " \"OldImage\": {\n" + + " \"stringAttribute\": {\"S\": \"\"}\n" + + " },\n" + + " \"SequenceNumber\": \"300000000000000499659\",\n" + + " \"SizeBytes\": 41,\n" + + " \"StreamViewType\": \"NEW_AND_OLD_IMAGES\"\n" + + " },\n" + + " \"eventID\": \"e2fd9c34eff2d779b297b26f5fef4206\",\n" + + " \"eventName\": \"INSERT\",\n" + + " \"eventSource\": \"aws:dynamodb\",\n" + + " \"eventVersion\": \"1.0\"\n" + + " }\n" + + " ]\n" + + "}"))); + + GetRecordsResponse response = dynamoDbStreamsClient.getRecords(r -> r.shardIterator("test")); + + assertThat(response.records()).hasSize(1); + StreamRecord record = response.records().get(0).dynamodb(); + assertThat(record.oldImage()).containsEntry("stringAttribute", EMPTY_STRING); + assertThat(record.newImage()).containsEntry("stringAttribute", EMPTY_STRING); + } + + @Test + public void asyncClient_getRecords_emptyString() { + stubFor(any(urlEqualTo("/")) + .willReturn(aResponse().withStatus(200).withBody( + "{" + + " \"NextShardIterator\": \"arn:aws:dynamodb:us-west-2:111122223333:table/Forum/stream/2015-05-20T20:51:10.252|1|AAAAAAAAAAGQBYshYDEe\",\n" + + " \"Records\": [\n" + + " {\n" + + " \"awsRegion\": \"us-west-2\",\n" + + " \"dynamodb\": {\n" + + " \"ApproximateCreationDateTime\": 1.46480431E9,\n" + + " \"Keys\": {\n" + + " \"stringKey\": {\"S\": \"DynamoDB\"}\n" + + " },\n" + + " \"NewImage\": {\n" + + " \"stringAttribute\": {\"S\": \"\"}\n" + + " },\n" + + " \"OldImage\": {\n" + + " \"stringAttribute\": {\"S\": \"\"}\n" + + " },\n" + + " \"SequenceNumber\": \"300000000000000499659\",\n" + + " \"SizeBytes\": 41,\n" + + " \"StreamViewType\": \"NEW_AND_OLD_IMAGES\"\n" + + " },\n" + + " \"eventID\": \"e2fd9c34eff2d779b297b26f5fef4206\",\n" + + " \"eventName\": \"INSERT\",\n" + + " \"eventSource\": \"aws:dynamodb\",\n" + + " \"eventVersion\": \"1.0\"\n" + + " }\n" + + " ]\n" + + "}"))); + + GetRecordsResponse response = dynamoDbStreamsAsyncClient.getRecords(r -> r.shardIterator("test")).join(); + + assertThat(response.records()).hasSize(1); + StreamRecord record = response.records().get(0).dynamodb(); + assertThat(record.oldImage()).containsEntry("stringAttribute", EMPTY_STRING); + assertThat(record.newImage()).containsEntry("stringAttribute", EMPTY_STRING); + } + + @Test + public void syncClient_getRecords_emptyBinary() { + stubFor(any(urlEqualTo("/")) + .willReturn(aResponse().withStatus(200).withBody( + "{" + + " \"NextShardIterator\": \"arn:aws:dynamodb:us-west-2:111122223333:table/Forum/stream/2015-05-20T20:51:10.252|1|AAAAAAAAAAGQBYshYDEe\",\n" + + " \"Records\": [\n" + + " {\n" + + " \"awsRegion\": \"us-west-2\",\n" + + " \"dynamodb\": {\n" + + " \"ApproximateCreationDateTime\": 1.46480431E9,\n" + + " \"Keys\": {\n" + + " \"stringKey\": {\"S\": \"DynamoDB\"}\n" + + " },\n" + + " \"NewImage\": {\n" + + " \"binaryAttribute\": {\"B\": \"\"}\n" + + " },\n" + + " \"OldImage\": {\n" + + " \"binaryAttribute\": {\"B\": \"\"}\n" + + " },\n" + + " \"SequenceNumber\": \"300000000000000499659\",\n" + + " \"SizeBytes\": 41,\n" + + " \"StreamViewType\": \"NEW_AND_OLD_IMAGES\"\n" + + " },\n" + + " \"eventID\": \"e2fd9c34eff2d779b297b26f5fef4206\",\n" + + " \"eventName\": \"INSERT\",\n" + + " \"eventSource\": \"aws:dynamodb\",\n" + + " \"eventVersion\": \"1.0\"\n" + + " }\n" + + " ]\n" + + "}"))); + + GetRecordsResponse response = dynamoDbStreamsClient.getRecords(r -> r.shardIterator("test")); + + assertThat(response.records()).hasSize(1); + StreamRecord record = response.records().get(0).dynamodb(); + assertThat(record.oldImage()).containsEntry("binaryAttribute", EMPTY_BINARY); + assertThat(record.newImage()).containsEntry("binaryAttribute", EMPTY_BINARY); + } + + @Test + public void asyncClient_getRecords_emptyBinary() { + stubFor(any(urlEqualTo("/")) + .willReturn(aResponse().withStatus(200).withBody( + "{" + + " \"NextShardIterator\": \"arn:aws:dynamodb:us-west-2:111122223333:table/Forum/stream/2015-05-20T20:51:10.252|1|AAAAAAAAAAGQBYshYDEe\",\n" + + " \"Records\": [\n" + + " {\n" + + " \"awsRegion\": \"us-west-2\",\n" + + " \"dynamodb\": {\n" + + " \"ApproximateCreationDateTime\": 1.46480431E9,\n" + + " \"Keys\": {\n" + + " \"stringKey\": {\"S\": \"DynamoDB\"}\n" + + " },\n" + + " \"NewImage\": {\n" + + " \"binaryAttribute\": {\"B\": \"\"}\n" + + " },\n" + + " \"OldImage\": {\n" + + " \"binaryAttribute\": {\"B\": \"\"}\n" + + " },\n" + + " \"SequenceNumber\": \"300000000000000499659\",\n" + + " \"SizeBytes\": 41,\n" + + " \"StreamViewType\": \"NEW_AND_OLD_IMAGES\"\n" + + " },\n" + + " \"eventID\": \"e2fd9c34eff2d779b297b26f5fef4206\",\n" + + " \"eventName\": \"INSERT\",\n" + + " \"eventSource\": \"aws:dynamodb\",\n" + + " \"eventVersion\": \"1.0\"\n" + + " }\n" + + " ]\n" + + "}"))); + + GetRecordsResponse response = dynamoDbStreamsAsyncClient.getRecords(r -> r.shardIterator("test")).join(); + + assertThat(response.records()).hasSize(1); + StreamRecord record = response.records().get(0).dynamodb(); + assertThat(record.oldImage()).containsEntry("binaryAttribute", EMPTY_BINARY); + assertThat(record.newImage()).containsEntry("binaryAttribute", EMPTY_BINARY); + } +} diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index 767950db3203..c335fd2e17e1 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ebs/src/main/resources/codegen-resources/service-2.json b/services/ebs/src/main/resources/codegen-resources/service-2.json index c562c9fbbe7a..5816fa30b957 100644 --- a/services/ebs/src/main/resources/codegen-resources/service-2.json +++ b/services/ebs/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,25 @@ "uid":"ebs-2019-11-02" }, "operations":{ + "CompleteSnapshot":{ + "name":"CompleteSnapshot", + "http":{ + "method":"POST", + "requestUri":"/snapshots/completion/{snapshotId}", + "responseCode":202 + }, + "input":{"shape":"CompleteSnapshotRequest"}, + "output":{"shape":"CompleteSnapshotResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Seals and completes the snapshot after all of the required blocks of data have been written to it. Completing the snapshot changes the status to completed. You cannot write new blocks to a snapshot after it has been completed.

" + }, "GetSnapshotBlock":{ "name":"GetSnapshotBlock", "http":{ @@ -21,8 +40,12 @@ "input":{"shape":"GetSnapshotBlockRequest"}, "output":{"shape":"GetSnapshotBlockResponse"}, "errors":[ + {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} ], "documentation":"

Returns the data in a block in an Amazon Elastic Block Store snapshot.

" }, @@ -35,8 +58,12 @@ "input":{"shape":"ListChangedBlocksRequest"}, "output":{"shape":"ListChangedBlocksResponse"}, "errors":[ + {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} ], "documentation":"

Returns the block indexes and block tokens for blocks that are different between two Amazon Elastic Block Store snapshots of the same volume/snapshot lineage.

" }, @@ -49,13 +76,79 @@ "input":{"shape":"ListSnapshotBlocksRequest"}, "output":{"shape":"ListSnapshotBlocksResponse"}, "errors":[ + {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} ], "documentation":"

Returns the block indexes and block tokens for blocks in an Amazon Elastic Block Store snapshot.

" + }, + "PutSnapshotBlock":{ + "name":"PutSnapshotBlock", + "http":{ + "method":"PUT", + "requestUri":"/snapshots/{snapshotId}/blocks/{blockIndex}", + "responseCode":201 + }, + "input":{"shape":"PutSnapshotBlockRequest"}, + "output":{"shape":"PutSnapshotBlockResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Writes a block of data to a block in the snapshot. If the specified block contains data, the existing data is overwritten. The target snapshot must be in the pending state.

Data written to a snapshot must be aligned with 512-byte sectors.

", + "authtype":"v4-unsigned-body" + }, + "StartSnapshot":{ + "name":"StartSnapshot", + "http":{ + "method":"POST", + "requestUri":"/snapshots", + "responseCode":201 + }, + "input":{"shape":"StartSnapshotRequest"}, + "output":{"shape":"StartSnapshotResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"RequestThrottledException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConcurrentLimitExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Creates a new Amazon EBS snapshot. The new snapshot enters the pending state after the request completes.

After creating the snapshot, use PutSnapshotBlock to write blocks of data to the snapshot.

" } }, "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Reason"], + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Reason":{ + "shape":"AccessDeniedExceptionReason", + "documentation":"

The reason for the exception.

" + } + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AccessDeniedExceptionReason":{ + "type":"string", + "enum":[ + "UNAUTHORIZED_ACCOUNT", + "DEPENDENCY_ACCESS_DENIED" + ] + }, "Block":{ "type":"structure", "members":{ @@ -75,7 +168,10 @@ "sensitive":true, "streaming":true }, - "BlockIndex":{"type":"integer"}, + "BlockIndex":{ + "type":"integer", + "min":0 + }, "BlockSize":{"type":"integer"}, "BlockToken":{ "type":"string", @@ -87,6 +183,7 @@ "member":{"shape":"Block"}, "sensitive":true }, + "Boolean":{"type":"boolean"}, "ChangedBlock":{ "type":"structure", "members":{ @@ -110,16 +207,99 @@ "type":"list", "member":{"shape":"ChangedBlock"} }, + "ChangedBlocksCount":{ + "type":"integer", + "min":0 + }, "Checksum":{ "type":"string", - "max":64 + "max":64, + "pattern":"^[A-Za-z0-9+/=]+$" + }, + "ChecksumAggregationMethod":{ + "type":"string", + "enum":["LINEAR"], + "max":32, + "pattern":"^[A-Za-z0-9]+$" }, "ChecksumAlgorithm":{ "type":"string", "enum":["SHA256"], - "max":32 + "max":32, + "pattern":"^[A-Za-z0-9]+$" + }, + "CompleteSnapshotRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "ChangedBlocksCount" + ], + "members":{ + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

The ID of the snapshot.

", + "location":"uri", + "locationName":"snapshotId" + }, + "ChangedBlocksCount":{ + "shape":"ChangedBlocksCount", + "documentation":"

The number of blocks that were written to the snapshot.

", + "location":"header", + "locationName":"x-amz-ChangedBlocksCount" + }, + "Checksum":{ + "shape":"Checksum", + "documentation":"

An aggregated Base-64 SHA256 checksum based on the checksums of each written block.

To generate the aggregated checksum using the linear aggregation method, arrange the checksums for each written block in ascending order of their block index, concatenate them to form a single string, and then generate the checksum on the entire string using the SHA256 algorithm.

", + "location":"header", + "locationName":"x-amz-Checksum" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "documentation":"

The algorithm used to generate the checksum. Currently, the only supported algorithm is SHA256.

", + "location":"header", + "locationName":"x-amz-Checksum-Algorithm" + }, + "ChecksumAggregationMethod":{ + "shape":"ChecksumAggregationMethod", + "documentation":"

The aggregation method used to generate the checksum. Currently, the only supported aggregation method is LINEAR.

", + "location":"header", + "locationName":"x-amz-Checksum-Aggregation-Method" + } + } + }, + "CompleteSnapshotResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"Status", + "documentation":"

The status of the snapshot.

" + } + } + }, + "ConcurrentLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

You have reached the limit for concurrent API requests. For more information, see Optimizing performance of the EBS direct APIs in the Amazon Elastic Compute Cloud User Guide.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request uses the same client token as a previous, but non-identical request.

", + "error":{"httpStatusCode":503}, + "exception":true }, "DataLength":{"type":"integer"}, + "Description":{ + "type":"string", + "max":255, + "pattern":"^[\\S\\s]+$" + }, "ErrorMessage":{ "type":"string", "max":256 @@ -180,6 +360,28 @@ }, "payload":"BlockData" }, + "IdempotencyToken":{ + "type":"string", + "max":255, + "pattern":"^[\\S]+$" + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

An internal error has occurred.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "KmsKeyArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:aws[a-z\\-]*:kms:.*:[0-9]{12}:key/.*", + "sensitive":true + }, "ListChangedBlocksRequest":{ "type":"structure", "required":["SecondSnapshotId"], @@ -301,27 +503,283 @@ "max":10000, "min":100 }, + "OwnerId":{ + "type":"string", + "max":24, + "min":1, + "pattern":"\\S+" + }, "PageToken":{ "type":"string", "max":256, "pattern":"^[A-Za-z0-9+/=]+$" }, + "Progress":{ + "type":"integer", + "max":100, + "min":0 + }, + "PutSnapshotBlockRequest":{ + "type":"structure", + "required":[ + "SnapshotId", + "BlockIndex", + "BlockData", + "DataLength", + "Checksum", + "ChecksumAlgorithm" + ], + "members":{ + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

The ID of the snapshot.

", + "location":"uri", + "locationName":"snapshotId" + }, + "BlockIndex":{ + "shape":"BlockIndex", + "documentation":"

The block index of the block in which to write the data. A block index is the offset position of a block within a snapshot, and it is used to identify the block. To identify the logical offset of the data in the logical volume, multiply the block index with the block size (Block index * 512 bytes).

", + "location":"uri", + "locationName":"blockIndex" + }, + "BlockData":{ + "shape":"BlockData", + "documentation":"

The data to write to the block.

The block data is not signed as part of the Signature Version 4 signing process. As a result, you must generate and provide a Base64-encoded SHA256 checksum for the block data using the x-amz-Checksum header. Also, you must specify the checksum algorithm using the x-amz-Checksum-Algorithm header. The checksum that you provide is part of the Signature Version 4 signing process. It is validated against a checksum generated by Amazon EBS to ensure the validity and authenticity of the data. If the checksums do not correspond, the request fails. For more information, see Using checksums with the EBS direct APIs in the Amazon Elastic Compute Cloud User Guide.

" + }, + "DataLength":{ + "shape":"DataLength", + "documentation":"

The size of the data to write to the block, in bytes. Currently, the only supported size is 524288.

Valid values: 524288

", + "location":"header", + "locationName":"x-amz-Data-Length" + }, + "Progress":{ + "shape":"Progress", + "documentation":"

The progress of the write process, as a percentage.

", + "location":"header", + "locationName":"x-amz-Progress" + }, + "Checksum":{ + "shape":"Checksum", + "documentation":"

A Base64-encoded SHA256 checksum of the data. Only SHA256 checksums are supported.

", + "location":"header", + "locationName":"x-amz-Checksum" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "documentation":"

The algorithm used to generate the checksum. Currently, the only supported algorithm is SHA256.

", + "location":"header", + "locationName":"x-amz-Checksum-Algorithm" + } + }, + "payload":"BlockData" + }, + "PutSnapshotBlockResponse":{ + "type":"structure", + "members":{ + "Checksum":{ + "shape":"Checksum", + "documentation":"

The SHA256 checksum generated for the block data by Amazon EBS.

", + "location":"header", + "locationName":"x-amz-Checksum" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "documentation":"

The algorithm used by Amazon EBS to generate the checksum.

", + "location":"header", + "locationName":"x-amz-Checksum-Algorithm" + } + } + }, + "RequestThrottledException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Reason":{ + "shape":"RequestThrottledExceptionReason", + "documentation":"

The reason for the exception.

" + } + }, + "documentation":"

The number of API requests has exceed the maximum allowed API request throttling limit.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "RequestThrottledExceptionReason":{ + "type":"string", + "enum":[ + "ACCOUNT_THROTTLED", + "DEPENDENCY_REQUEST_THROTTLED" + ] + }, "ResourceNotFoundException":{ "type":"structure", "members":{ - "Message":{"shape":"ErrorMessage"} + "Message":{"shape":"ErrorMessage"}, + "Reason":{ + "shape":"ResourceNotFoundExceptionReason", + "documentation":"

The reason for the exception.

" + } }, "documentation":"

The specified resource does not exist.

", "error":{"httpStatusCode":404}, "exception":true }, + "ResourceNotFoundExceptionReason":{ + "type":"string", + "enum":[ + "SNAPSHOT_NOT_FOUND", + "DEPENDENCY_RESOURCE_NOT_FOUND" + ] + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "Reason":{ + "shape":"ServiceQuotaExceededExceptionReason", + "documentation":"

The reason for the exception.

" + } + }, + "documentation":"

Your current service quotas do not allow you to perform this action.

", + "error":{"httpStatusCode":402}, + "exception":true + }, + "ServiceQuotaExceededExceptionReason":{ + "type":"string", + "enum":["DEPENDENCY_SERVICE_QUOTA_EXCEEDED"] + }, "SnapshotId":{ "type":"string", "max":64, "min":1, "pattern":"^snap-[0-9a-f]+$" }, + "StartSnapshotRequest":{ + "type":"structure", + "required":["VolumeSize"], + "members":{ + "VolumeSize":{ + "shape":"VolumeSize", + "documentation":"

The size of the volume, in GiB. The maximum size is 16384 GiB (16 TiB).

" + }, + "ParentSnapshotId":{ + "shape":"SnapshotId", + "documentation":"

The ID of the parent snapshot. If there is no parent snapshot, or if you are creating the first snapshot for an on-premises volume, omit this parameter.

If your account is enabled for encryption by default, you cannot use an unencrypted snapshot as a parent snapshot. You must first create an encrypted copy of the parent snapshot using CopySnapshot.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags to apply to the snapshot.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description for the snapshot.

" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully. The subsequent retries with the same client token return the result from the original successful request and they have no additional effect.

If you do not specify a client token, one is automatically generated by the AWS SDK.

For more information, see Idempotency for StartSnapshot API in the Amazon Elastic Compute Cloud User Guide.

", + "idempotencyToken":true + }, + "Encrypted":{ + "shape":"Boolean", + "documentation":"

Indicates whether to encrypt the snapshot. To create an encrypted snapshot, specify true. To create an unencrypted snapshot, omit this parameter.

If you specify a value for ParentSnapshotId, omit this parameter.

If you specify true, the snapshot is encrypted using the CMK specified using the KmsKeyArn parameter. If no value is specified for KmsKeyArn, the default CMK for your account is used. If no default CMK has been specified for your account, the AWS managed CMK is used. To set a default CMK for your account, use ModifyEbsDefaultKmsKeyId.

If your account is enabled for encryption by default, you cannot set this parameter to false. In this case, you can omit this parameter.

For more information, see Using encryption in the Amazon Elastic Compute Cloud User Guide.

" + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer master key (CMK) to be used to encrypt the snapshot. If you do not specify a CMK, the default AWS managed CMK is used.

If you specify a ParentSnapshotId, omit this parameter; the snapshot will be encrypted using the same CMK that was used to encrypt the parent snapshot.

If Encrypted is set to true, you must specify a CMK ARN.

" + }, + "Timeout":{ + "shape":"Timeout", + "documentation":"

The amount of time (in minutes) after which the snapshot is automatically cancelled if:

  • No blocks are written to the snapshot.

  • The snapshot is not completed after writing the last block of data.

If no value is specified, the timeout defaults to 60 minutes.

" + } + } + }, + "StartSnapshotResponse":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"Description", + "documentation":"

The description of the snapshot.

" + }, + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

The ID of the snapshot.

" + }, + "OwnerId":{ + "shape":"OwnerId", + "documentation":"

The AWS account ID of the snapshot owner.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the snapshot.

" + }, + "StartTime":{ + "shape":"TimeStamp", + "documentation":"

The timestamp when the snapshot was created.

" + }, + "VolumeSize":{ + "shape":"VolumeSize", + "documentation":"

The size of the volume, in GiB.

" + }, + "BlockSize":{ + "shape":"BlockSize", + "documentation":"

The size of the blocks in the snapshot, in bytes.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags applied to the snapshot. You can specify up to 50 tags per snapshot. For more information, see Tagging your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide.

" + }, + "ParentSnapshotId":{ + "shape":"SnapshotId", + "documentation":"

The ID of the parent snapshot.

" + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) customer master key (CMK) used to encrypt the snapshot.

" + } + } + }, + "Status":{ + "type":"string", + "enum":[ + "completed", + "pending", + "error" + ], + "max":32 + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key of the tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value of the tag.

" + } + }, + "documentation":"

Describes a tag.

" + }, + "TagKey":{ + "type":"string", + "max":127, + "pattern":"^[\\S\\s]+$" + }, + "TagValue":{ + "type":"string", + "max":255, + "pattern":"^[\\S\\s]+$" + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"} + }, "TimeStamp":{"type":"timestamp"}, + "Timeout":{ + "type":"integer", + "max":60, + "min":10 + }, "ValidationException":{ "type":"structure", "members":{ @@ -342,10 +800,19 @@ "INVALID_PAGE_TOKEN", "INVALID_BLOCK_TOKEN", "INVALID_SNAPSHOT_ID", - "UNRELATED_SNAPSHOTS" + "UNRELATED_SNAPSHOTS", + "INVALID_BLOCK", + "INVALID_CONTENT_ENCODING", + "INVALID_TAG", + "INVALID_DEPENDENCY_REQUEST", + "INVALID_PARAMETER_VALUE", + "INVALID_VOLUME_SIZE" ] }, - "VolumeSize":{"type":"long"} + "VolumeSize":{ + "type":"long", + "min":1 + } }, - "documentation":"

You can use the Amazon Elastic Block Store (EBS) direct APIs to directly read the data on your EBS snapshots, and identify the difference between two snapshots. You can view the details of blocks in an EBS snapshot, compare the block difference between two snapshots, and directly access the data in a snapshot. If you’re an independent software vendor (ISV) who offers backup services for EBS, the EBS direct APIs make it easier and more cost-effective to track incremental changes on your EBS volumes via EBS snapshots. This can be done without having to create new volumes from EBS snapshots.

This API reference provides detailed information about the actions, data types, parameters, and errors of the EBS direct APIs. For more information about the elements that make up the EBS direct APIs, and examples of how to use them effectively, see Accessing the Contents of an EBS Snapshot in the Amazon Elastic Compute Cloud User Guide. For more information about the supported AWS Regions, endpoints, and service quotas for the EBS direct APIs, see Amazon Elastic Block Store Endpoints and Quotas in the AWS General Reference.

" + "documentation":"

You can use the Amazon Elastic Block Store (EBS) direct APIs to directly read the data on your EBS snapshots, and identify the difference between two snapshots. You can view the details of blocks in an EBS snapshot, compare the block difference between two snapshots, and directly access the data in a snapshot. If you're an independent software vendor (ISV) who offers backup services for EBS, the EBS direct APIs make it easier and more cost-effective to track incremental changes on your EBS volumes via EBS snapshots. This can be done without having to create new volumes from EBS snapshots.

This API reference provides detailed information about the actions, data types, parameters, and errors of the EBS direct APIs. For more information about the elements that make up the EBS direct APIs, and examples of how to use them effectively, see Accessing the Contents of an EBS Snapshot in the Amazon Elastic Compute Cloud User Guide. For more information about the supported AWS Regions, endpoints, and service quotas for the EBS direct APIs, see Amazon Elastic Block Store Endpoints and Quotas in the AWS General Reference.

" } diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index fc5435f114a2..eca7a4879129 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2/src/main/resources/codegen-resources/paginators-1.json b/services/ec2/src/main/resources/codegen-resources/paginators-1.json index c6fa9c83291c..0cb7eb79ee45 100755 --- a/services/ec2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/ec2/src/main/resources/codegen-resources/paginators-1.json @@ -249,6 +249,12 @@ "output_token": "NextToken", "result_key": "LocalGateways" }, + "DescribeManagedPrefixLists": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PrefixLists" + }, "DescribeMovingAddresses": { "input_token": "NextToken", "limit_key": "MaxResults", @@ -518,6 +524,18 @@ "output_token": "NextToken", "result_key": "Ipv6CidrAssociations" }, + "GetManagedPrefixListAssociations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "PrefixListAssociations" + }, + "GetManagedPrefixListEntries": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Entries" + }, "GetTransitGatewayAttachmentPropagations": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 21b7803c1fdf..61b43bc8bbd7 100755 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -484,7 +484,7 @@ }, "input":{"shape":"CreateDhcpOptionsRequest"}, "output":{"shape":"CreateDhcpOptionsResult"}, - "documentation":"

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

  • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas. To have your instance receive a custom DNS hostname as specified in domain-name, you must set domain-name-servers to a custom DNS server.

  • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another Region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, MyCompany.com). This value is used to complete unqualified DNS hostnames. Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.

  • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.

  • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

  • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

  • domain-name-servers - The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. If specifying more than one domain name server, specify the IP addresses in a single parameter, separated by commas. To have your instance receive a custom DNS hostname as specified in domain-name, you must set domain-name-servers to a custom DNS server.

  • domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in another Region, specify region.compute.internal (for example, ap-northeast-1.compute.internal). Otherwise, specify a domain name (for example, ExampleCompany.com). This value is used to complete unqualified DNS hostnames. Important: Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name.

  • ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) servers.

  • netbios-name-servers - The IP addresses of up to four NetBIOS name servers.

  • netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported). For more information about these node types, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

" }, "CreateEgressOnlyInternetGateway":{ "name":"CreateEgressOnlyInternetGateway", @@ -544,7 +544,7 @@ }, "input":{"shape":"CreateInstanceExportTaskRequest"}, "output":{"shape":"CreateInstanceExportTaskResult"}, - "documentation":"

Exports a running or stopped instance to an S3 bucket.

For information about the supported operating systems, image formats, and known limitations for the types of instances you can export, see Exporting an Instance as a VM Using VM Import/Export in the VM Import/Export User Guide.

" + "documentation":"

Exports a running or stopped instance to an Amazon S3 bucket.

For information about the supported operating systems, image formats, and known limitations for the types of instances you can export, see Exporting an Instance as a VM Using VM Import/Export in the VM Import/Export User Guide.

" }, "CreateInternetGateway":{ "name":"CreateInternetGateway", @@ -574,7 +574,7 @@ }, "input":{"shape":"CreateLaunchTemplateRequest"}, "output":{"shape":"CreateLaunchTemplateResult"}, - "documentation":"

Creates a launch template. A launch template contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify a launch template instead of providing the launch parameters in the request.

" + "documentation":"

Creates a launch template. A launch template contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify a launch template instead of providing the launch parameters in the request. For more information, see Launching an instance from a launch templatein the Amazon Elastic Compute Cloud User Guide.

" }, "CreateLaunchTemplateVersion":{ "name":"CreateLaunchTemplateVersion", @@ -584,7 +584,7 @@ }, "input":{"shape":"CreateLaunchTemplateVersionRequest"}, "output":{"shape":"CreateLaunchTemplateVersionResult"}, - "documentation":"

Creates a new version for a launch template. You can specify an existing version of launch template from which to base the new version.

Launch template versions are numbered in the order in which they are created. You cannot specify, change, or replace the numbering of launch template versions.

" + "documentation":"

Creates a new version for a launch template. You can specify an existing version of launch template from which to base the new version.

Launch template versions are numbered in the order in which they are created. You cannot specify, change, or replace the numbering of launch template versions.

For more information, see Managing launch template versionsin the Amazon Elastic Compute Cloud User Guide.

" }, "CreateLocalGatewayRoute":{ "name":"CreateLocalGatewayRoute", @@ -606,6 +606,16 @@ "output":{"shape":"CreateLocalGatewayRouteTableVpcAssociationResult"}, "documentation":"

Associates the specified VPC with the specified local gateway route table.

" }, + "CreateManagedPrefixList":{ + "name":"CreateManagedPrefixList", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateManagedPrefixListRequest"}, + "output":{"shape":"CreateManagedPrefixListResult"}, + "documentation":"

Creates a managed prefix list. You can specify one or more entries for the prefix list. Each entry consists of a CIDR block and an optional description.

You must specify the maximum number of entries for the prefix list. The maximum number of entries cannot be changed later.

" + }, "CreateNatGateway":{ "name":"CreateNatGateway", "http":{ @@ -662,7 +672,8 @@ "requestUri":"/" }, "input":{"shape":"CreatePlacementGroupRequest"}, - "documentation":"

Creates a placement group in which to launch instances. The strategy of the placement group determines how the instances are organized within the group.

A cluster placement group is a logical grouping of instances within a single Availability Zone that benefit from low network latency, high network throughput. A spread placement group places instances on distinct hardware. A partition placement group places groups of instances in different partitions, where instances in one partition do not share the same hardware with instances in another partition.

For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" + "output":{"shape":"CreatePlacementGroupResult"}, + "documentation":"

Creates a placement group in which to launch instances. The strategy of the placement group determines how the instances are organized within the group.

A cluster placement group is a logical grouping of instances within a single Availability Zone that benefit from low network latency, high network throughput. A spread placement group places instances on distinct hardware. A partition placement group places groups of instances in different partitions, where instances in one partition do not share the same hardware with instances in another partition.

For more information, see Placement groups in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateReservedInstancesListing":{ "name":"CreateReservedInstancesListing", @@ -732,7 +743,7 @@ }, "input":{"shape":"CreateSpotDatafeedSubscriptionRequest"}, "output":{"shape":"CreateSpotDatafeedSubscriptionResult"}, - "documentation":"

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance Data Feed in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per AWS account. For more information, see Spot Instance data feed in the Amazon EC2 User Guide for Linux Instances.

" }, "CreateSubnet":{ "name":"CreateSubnet", @@ -742,7 +753,7 @@ }, "input":{"shape":"CreateSubnetRequest"}, "output":{"shape":"CreateSubnetResult"}, - "documentation":"

Creates a subnet in an existing VPC.

When you create each subnet, you provide the VPC ID and IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses).

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP address doesn't change if you stop and restart the instance (unlike a similar instance launched outside a VPC, which gets a new IP address when restarted). It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Creates a subnet in a specified VPC.

You must specify an IPv4 CIDR block for the subnet. After you create a subnet, you can't change its CIDR block. The allowed block size is between a /16 netmask (65,536 IP addresses) and /28 netmask (16 IP addresses). The CIDR block must not overlap with the CIDR block of an existing subnet in the VPC.

If you've associated an IPv6 CIDR block with your VPC, you can create a subnet with an IPv6 CIDR block that uses a /64 prefix length.

AWS reserves both the first four and the last IPv4 address in each subnet's CIDR block. They're not available for use.

If you add more than one subnet to a VPC, they're set up in a star topology with a logical router in the middle.

When you stop an instance in a subnet, it retains its private IPv4 address. It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available.

For more information about subnets, see Your VPC and Subnets in the Amazon Virtual Private Cloud User Guide.

" }, "CreateTags":{ "name":"CreateTags", @@ -751,7 +762,7 @@ "requestUri":"/" }, "input":{"shape":"CreateTagsRequest"}, - "documentation":"

Adds or overwrites the specified tags for the specified Amazon EC2 resource or resources. Each resource can have a maximum of 50 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide. For more information about creating IAM policies that control users' access to resources based on tags, see Supported Resource-Level Permissions for Amazon EC2 API Actions in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Adds or overwrites only the specified tags for the specified Amazon EC2 resource or resources. When you specify an existing tag key, the value is overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and optional value. Tag keys must be unique per resource.

For more information about tags, see Tagging Your Resources in the Amazon Elastic Compute Cloud User Guide. For more information about creating IAM policies that control users' access to resources based on tags, see Supported Resource-Level Permissions for Amazon EC2 API Actions in the Amazon Elastic Compute Cloud User Guide.

" }, "CreateTrafficMirrorFilter":{ "name":"CreateTrafficMirrorFilter", @@ -1078,6 +1089,16 @@ "output":{"shape":"DeleteLocalGatewayRouteTableVpcAssociationResult"}, "documentation":"

Deletes the specified association between a VPC and local gateway route table.

" }, + "DeleteManagedPrefixList":{ + "name":"DeleteManagedPrefixList", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteManagedPrefixListRequest"}, + "output":{"shape":"DeleteManagedPrefixListResult"}, + "documentation":"

Deletes the specified managed prefix list. You must first remove all references to the prefix list in your resources.

" + }, "DeleteNatGateway":{ "name":"DeleteNatGateway", "http":{ @@ -1132,7 +1153,7 @@ "requestUri":"/" }, "input":{"shape":"DeletePlacementGroupRequest"}, - "documentation":"

Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information, see Placement groups in the Amazon Elastic Compute Cloud User Guide.

" }, "DeleteQueuedReservedInstances":{ "name":"DeleteQueuedReservedInstances", @@ -1372,7 +1393,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteVpnConnectionRequest"}, - "documentation":"

Deletes the specified VPN connection.

If you're deleting the VPC and its associated components, we recommend that you detach the virtual private gateway from the VPC and delete the VPC before deleting the VPN connection. If you believe that the tunnel credentials for your VPN connection have been compromised, you can delete the VPN connection and create a new one that has new keys, without needing to delete the VPC or virtual private gateway. If you create a new VPN connection, you must reconfigure the customer gateway using the new configuration information returned with the new VPN connection ID.

" + "documentation":"

Deletes the specified VPN connection.

If you're deleting the VPC and its associated components, we recommend that you detach the virtual private gateway from the VPC and delete the VPC before deleting the VPN connection. If you believe that the tunnel credentials for your VPN connection have been compromised, you can delete the VPN connection and create a new one that has new keys, without needing to delete the VPC or virtual private gateway. If you create a new VPN connection, you must reconfigure the customer gateway device using the new configuration information returned with the new VPN connection ID.

For certificate-based authentication, delete all AWS Certificate Manager (ACM) private certificates used for the AWS-side tunnel endpoints for the VPN connection before deleting the VPN connection.

" }, "DeleteVpnConnectionRoute":{ "name":"DeleteVpnConnectionRoute", @@ -1411,6 +1432,16 @@ "input":{"shape":"DeregisterImageRequest"}, "documentation":"

Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch new instances; however, it doesn't affect any instances that you've already launched from the AMI. You'll continue to incur usage costs for those instances until you terminate them.

When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was created for the root volume of the instance during the AMI creation process. When you deregister an instance store-backed AMI, it doesn't affect the files that you uploaded to Amazon S3 when you created the AMI.

" }, + "DeregisterInstanceEventNotificationAttributes":{ + "name":"DeregisterInstanceEventNotificationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterInstanceEventNotificationAttributesRequest"}, + "output":{"shape":"DeregisterInstanceEventNotificationAttributesResult"}, + "documentation":"

Deregisters tag keys to prevent tags that have the specified tag keys from being included in scheduled event notifications for resources in the Region.

" + }, "DeregisterTransitGatewayMulticastGroupMembers":{ "name":"DeregisterTransitGatewayMulticastGroupMembers", "http":{ @@ -1629,7 +1660,7 @@ }, "input":{"shape":"DescribeExportImageTasksRequest"}, "output":{"shape":"DescribeExportImageTasksResult"}, - "documentation":"

Describes the specified export image tasks or all your export image tasks.

" + "documentation":"

Describes the specified export image tasks or all of your export image tasks.

" }, "DescribeExportTasks":{ "name":"DescribeExportTasks", @@ -1639,7 +1670,7 @@ }, "input":{"shape":"DescribeExportTasksRequest"}, "output":{"shape":"DescribeExportTasksResult"}, - "documentation":"

Describes the specified export instance tasks or all your export instance tasks.

" + "documentation":"

Describes the specified export instance tasks or all of your export instance tasks.

" }, "DescribeFastSnapshotRestores":{ "name":"DescribeFastSnapshotRestores", @@ -1829,7 +1860,17 @@ }, "input":{"shape":"DescribeInstanceCreditSpecificationsRequest"}, "output":{"shape":"DescribeInstanceCreditSpecificationsResult"}, - "documentation":"

Describes the credit option for CPU usage of the specified burstable performance instances. The credit options are standard and unlimited.

If you do not specify an instance ID, Amazon EC2 returns burstable performance instances with the unlimited credit option, as well as instances that were previously configured as T2, T3, and T3a with the unlimited credit option. For example, if you resize a T2 instance, while it is configured as unlimited, to an M4 instance, Amazon EC2 returns the M4 instance.

If you specify one or more instance IDs, Amazon EC2 returns the credit option (standard or unlimited) of those instances. If you specify an instance ID that is not valid, such as an instance that is not a burstable performance instance, an error is returned.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If an Availability Zone is experiencing a service disruption and you specify instance IDs in the affected zone, or do not specify any instance IDs at all, the call fails. If you specify only instance IDs in an unaffected zone, the call works normally.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the credit option for CPU usage of the specified burstable performance instances. The credit options are standard and unlimited.

If you do not specify an instance ID, Amazon EC2 returns burstable performance instances with the unlimited credit option, as well as instances that were previously configured as T2, T3, and T3a with the unlimited credit option. For example, if you resize a T2 instance, while it is configured as unlimited, to an M4 instance, Amazon EC2 returns the M4 instance.

If you specify one or more instance IDs, Amazon EC2 returns the credit option (standard or unlimited) of those instances. If you specify an instance ID that is not valid, such as an instance that is not a burstable performance instance, an error is returned.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If an Availability Zone is experiencing a service disruption and you specify instance IDs in the affected zone, or do not specify any instance IDs at all, the call fails. If you specify only instance IDs in an unaffected zone, the call works normally.

For more information, see Burstable performance instances in the Amazon Elastic Compute Cloud User Guide.

" + }, + "DescribeInstanceEventNotificationAttributes":{ + "name":"DescribeInstanceEventNotificationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceEventNotificationAttributesRequest"}, + "output":{"shape":"DescribeInstanceEventNotificationAttributesResult"}, + "documentation":"

Describes the tag keys that are registered to appear in scheduled event notifications for resources in the current Region.

" }, "DescribeInstanceStatus":{ "name":"DescribeInstanceStatus", @@ -1839,7 +1880,7 @@ }, "input":{"shape":"DescribeInstanceStatusRequest"}, "output":{"shape":"DescribeInstanceStatusResult"}, - "documentation":"

Describes the status of the specified instances or all of your instances. By default, only running instances are described, unless you specifically indicate to return the status of all instances.

Instance status includes the following components:

  • Status checks - Amazon EC2 performs status checks on running EC2 instances to identify hardware and software issues. For more information, see Status Checks for Your Instances and Troubleshooting Instances with Failed Status Checks in the Amazon Elastic Compute Cloud User Guide.

  • Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your instances related to hardware issues, software updates, or system maintenance. For more information, see Scheduled Events for Your Instances in the Amazon Elastic Compute Cloud User Guide.

  • Instance state - You can manage your instances from the moment you launch them through their termination. For more information, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the status of the specified instances or all of your instances. By default, only running instances are described, unless you specifically indicate to return the status of all instances.

Instance status includes the following components:

  • Status checks - Amazon EC2 performs status checks on running EC2 instances to identify hardware and software issues. For more information, see Status checks for your instances and Troubleshooting instances with failed status checks in the Amazon Elastic Compute Cloud User Guide.

  • Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your instances related to hardware issues, software updates, or system maintenance. For more information, see Scheduled events for your instances in the Amazon Elastic Compute Cloud User Guide.

  • Instance state - You can manage your instances from the moment you launch them through their termination. For more information, see Instance lifecycle in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeInstanceTypeOfferings":{ "name":"DescribeInstanceTypeOfferings", @@ -1859,7 +1900,7 @@ }, "input":{"shape":"DescribeInstanceTypesRequest"}, "output":{"shape":"DescribeInstanceTypesResult"}, - "documentation":"

Returns a list of all instance types offered in your current AWS Region. The results can be filtered by the attributes of the instance types.

" + "documentation":"

Describes the details of the instance types that are offered in a location. The results can be filtered by the attributes of the instance types.

" }, "DescribeInstances":{ "name":"DescribeInstances", @@ -1869,7 +1910,7 @@ }, "input":{"shape":"DescribeInstancesRequest"}, "output":{"shape":"DescribeInstancesResult"}, - "documentation":"

Describes the specified instances or all of AWS account's instances.

If you specify one or more instance IDs, Amazon EC2 returns information for those instances. If you do not specify instance IDs, Amazon EC2 returns information for all relevant instances. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the returned results.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If you describe instances in the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call works normally.

" + "documentation":"

Describes the specified instances or all instances.

If you specify instance IDs, the output includes information for only the specified instances. If you specify filters, the output includes information for only those instances that meet the filter criteria. If you do not specify instance IDs or filters, the output includes information for all instances, which can affect performance. We recommend that you use pagination to ensure that the operation returns quickly and successfully.

If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the output.

Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

If you describe instances in the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call works normally.

" }, "DescribeInternetGateways":{ "name":"DescribeInternetGateways", @@ -1981,6 +2022,16 @@ "output":{"shape":"DescribeLocalGatewaysResult"}, "documentation":"

Describes one or more local gateways. By default, all local gateways are described. Alternatively, you can filter the results.

" }, + "DescribeManagedPrefixLists":{ + "name":"DescribeManagedPrefixLists", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeManagedPrefixListsRequest"}, + "output":{"shape":"DescribeManagedPrefixListsResult"}, + "documentation":"

Describes your managed prefix lists and any AWS-managed prefix lists.

To view the entries for your prefix list, use GetManagedPrefixListEntries.

" + }, "DescribeMovingAddresses":{ "name":"DescribeMovingAddresses", "http":{ @@ -2049,7 +2100,7 @@ }, "input":{"shape":"DescribePlacementGroupsRequest"}, "output":{"shape":"DescribePlacementGroupsResult"}, - "documentation":"

Describes the specified placement groups or all of your placement groups. For more information, see Placement Groups in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified placement groups or all of your placement groups. For more information, see Placement groups in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribePrefixLists":{ "name":"DescribePrefixLists", @@ -2059,7 +2110,7 @@ }, "input":{"shape":"DescribePrefixListsRequest"}, "output":{"shape":"DescribePrefixListsResult"}, - "documentation":"

Describes available AWS services in a prefix list format, which includes the prefix list name and prefix list ID of the service and the IP address range for the service. A prefix list ID is required for creating an outbound security group rule that allows traffic from a VPC to access an AWS service through a gateway VPC endpoint. Currently, the services that support this action are Amazon S3 and Amazon DynamoDB.

" + "documentation":"

Describes available AWS services in a prefix list format, which includes the prefix list name and prefix list ID of the service and the IP address range for the service.

We recommend that you use DescribeManagedPrefixLists instead.

" }, "DescribePrincipalIdFormat":{ "name":"DescribePrincipalIdFormat", @@ -2199,7 +2250,7 @@ }, "input":{"shape":"DescribeSnapshotsRequest"}, "output":{"shape":"DescribeSnapshotsResult"}, - "documentation":"

Describes the specified EBS snapshots available to you or all of the EBS snapshots available to you.

The snapshots available to you include public snapshots, private snapshots that you own, and private snapshots owned by other AWS accounts for which you have explicit create volume permissions.

The create volume permissions fall into the following categories:

  • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All AWS accounts have create volume permissions for these snapshots.

  • explicit: The owner of the snapshot granted create volume permissions to a specific AWS account.

  • implicit: An AWS account has implicit create volume permissions for all snapshots it owns.

The list of snapshots returned can be filtered by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

If you are describing a long list of snapshots, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores.

For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified EBS snapshots available to you or all of the EBS snapshots available to you.

The snapshots available to you include public snapshots, private snapshots that you own, and private snapshots owned by other AWS accounts for which you have explicit create volume permissions.

The create volume permissions fall into the following categories:

  • public: The owner of the snapshot granted create volume permissions for the snapshot to the all group. All AWS accounts have create volume permissions for these snapshots.

  • explicit: The owner of the snapshot granted create volume permissions to a specific AWS account.

  • implicit: An AWS account has implicit create volume permissions for all snapshots it owns.

The list of snapshots returned can be filtered by specifying snapshot IDs, snapshot owners, or AWS accounts with create volume permissions. If no options are specified, Amazon EC2 returns all snapshots for which you have create volume permissions.

If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. If you specify an invalid snapshot ID, an error is returned. If you specify a snapshot ID for which you do not have access, it is not included in the returned results.

If you specify one or more snapshot owners using the OwnerIds option, only snapshots from the specified owners and for which you have access are returned. The results can include the AWS account IDs of the specified owners, amazon for snapshots owned by Amazon, or self for snapshots that you own.

If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are returned. You can specify AWS account IDs (if you own the snapshots), self for snapshots for which you own or have explicit permissions, or all for public snapshots.

If you are describing a long list of snapshots, we recommend that you paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSnapshots request to retrieve the remaining results.

To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores.

For more information about EBS snapshots, see Amazon EBS Snapshots in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeSpotDatafeedSubscription":{ "name":"DescribeSpotDatafeedSubscription", @@ -2209,7 +2260,7 @@ }, "input":{"shape":"DescribeSpotDatafeedSubscriptionRequest"}, "output":{"shape":"DescribeSpotDatafeedSubscriptionResult"}, - "documentation":"

Describes the data feed for Spot Instances. For more information, see Spot Instance Data Feed in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Describes the data feed for Spot Instances. For more information, see Spot Instance data feed in the Amazon EC2 User Guide for Linux Instances.

" }, "DescribeSpotFleetInstances":{ "name":"DescribeSpotFleetInstances", @@ -2249,7 +2300,7 @@ }, "input":{"shape":"DescribeSpotInstanceRequestsRequest"}, "output":{"shape":"DescribeSpotInstanceRequestsResult"}, - "documentation":"

Describes the specified Spot Instance requests.

You can use DescribeSpotInstanceRequests to find a running Spot Instance by examining the response. If the status of the Spot Instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

We recommend that you set MaxResults to a value between 5 and 1000 to limit the number of results returned. This paginates the output, which makes the list more manageable and returns the results faster. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSpotInstanceRequests request to retrieve the remaining results.

Spot Instance requests are deleted four hours after they are canceled and their instances are terminated.

" + "documentation":"

Describes the specified Spot Instance requests.

You can use DescribeSpotInstanceRequests to find a running Spot Instance by examining the response. If the status of the Spot Instance is fulfilled, the instance ID appears in the response and contains the identifier of the instance. Alternatively, you can use DescribeInstances with a filter to look for instances where the instance lifecycle is spot.

We recommend that you set MaxResults to a value between 5 and 1000 to limit the number of results returned. This paginates the output, which makes the list more manageable and returns the results faster. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeSpotInstanceRequests request to retrieve the remaining results.

Spot Instance requests are deleted four hours after they are canceled and their instances are terminated.

" }, "DescribeSpotPriceHistory":{ "name":"DescribeSpotPriceHistory", @@ -2259,7 +2310,7 @@ }, "input":{"shape":"DescribeSpotPriceHistoryRequest"}, "output":{"shape":"DescribeSpotPriceHistoryResult"}, - "documentation":"

Describes the Spot price history. For more information, see Spot Instance Pricing History in the Amazon EC2 User Guide for Linux Instances.

When you specify a start and end time, this operation returns the prices of the instance types within the time range that you specified and the time when the price changed. The price is valid within the time period that you specified; the response merely indicates the last time that the price changed.

" + "documentation":"

Describes the Spot price history. For more information, see Spot Instance pricing history in the Amazon EC2 User Guide for Linux Instances.

When you specify a start and end time, this operation returns the prices of the instance types within the time range that you specified and the time when the price changed. The price is valid within the time period that you specified; the response merely indicates the last time that the price changed.

" }, "DescribeStaleSecurityGroups":{ "name":"DescribeStaleSecurityGroups", @@ -2409,7 +2460,7 @@ }, "input":{"shape":"DescribeVolumesRequest"}, "output":{"shape":"DescribeVolumesResult"}, - "documentation":"

Describes the specified EBS volumes or all of your EBS volumes.

If you are describing a long list of volumes, you can paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the specified EBS volumes or all of your EBS volumes.

If you are describing a long list of volumes, we recommend that you paginate the output to make the list more manageable. The MaxResults parameter sets the maximum number of results returned in a single page. If the list of results exceeds your MaxResults value, then that number of results is returned along with a NextToken value that can be passed to a subsequent DescribeVolumes request to retrieve the remaining results.

For more information about EBS volumes, see Amazon EBS Volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVolumesModifications":{ "name":"DescribeVolumesModifications", @@ -2419,7 +2470,7 @@ }, "input":{"shape":"DescribeVolumesModificationsRequest"}, "output":{"shape":"DescribeVolumesModificationsResult"}, - "documentation":"

Reports the current modification status of EBS volumes.

Current-generation EBS volumes support modification of attributes including type, size, and (for io1 volumes) IOPS provisioning while either attached to or detached from an instance. Following an action from the API or the console to modify a volume, the status of the modification may be modifying, optimizing, completed, or failed. If a volume has never been modified, then certain elements of the returned VolumeModification objects are null.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitoring Volume Modifications\" in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the most recent volume modification request for the specified EBS volumes.

If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request.

You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitoring Volume Modifications in the Amazon Elastic Compute Cloud User Guide.

" }, "DescribeVpcAttribute":{ "name":"DescribeVpcAttribute", @@ -2655,7 +2706,7 @@ }, "input":{"shape":"DisableVpcClassicLinkDnsSupportRequest"}, "output":{"shape":"DisableVpcClassicLinkDnsSupportResult"}, - "documentation":"

Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP addresses when addressed between a linked EC2-Classic instance and instances in the VPC to which it's linked. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP addresses when addressed between a linked EC2-Classic instance and instances in the VPC to which it's linked. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

You must specify a VPC ID in the request.

" }, "DisassociateAddress":{ "name":"DisassociateAddress", @@ -2693,7 +2744,7 @@ "requestUri":"/" }, "input":{"shape":"DisassociateRouteTableRequest"}, - "documentation":"

Disassociates a subnet from a route table.

After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" + "documentation":"

Disassociates a subnet or gateway from a route table.

After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route Tables in the Amazon Virtual Private Cloud User Guide.

" }, "DisassociateSubnetCidrBlock":{ "name":"DisassociateSubnetCidrBlock", @@ -2801,7 +2852,7 @@ }, "input":{"shape":"EnableVpcClassicLinkDnsSupportRequest"}, "output":{"shape":"EnableVpcClassicLinkDnsSupportResult"}, - "documentation":"

Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when addressed from a linked EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute Cloud User Guide.

You must specify a VPC ID in the request.

" }, "ExportClientVpnClientCertificateRevocationList":{ "name":"ExportClientVpnClientCertificateRevocationList", @@ -2901,7 +2952,7 @@ }, "input":{"shape":"GetDefaultCreditSpecificationRequest"}, "output":{"shape":"GetDefaultCreditSpecificationResult"}, - "documentation":"

Describes the default credit option for CPU usage of a burstable performance instance family.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Describes the default credit option for CPU usage of a burstable performance instance family.

For more information, see Burstable performance instances in the Amazon Elastic Compute Cloud User Guide.

" }, "GetEbsDefaultKmsKeyId":{ "name":"GetEbsDefaultKmsKeyId", @@ -2943,6 +2994,26 @@ "output":{"shape":"GetLaunchTemplateDataResult"}, "documentation":"

Retrieves the configuration data of the specified instance. You can use this data to create a launch template.

" }, + "GetManagedPrefixListAssociations":{ + "name":"GetManagedPrefixListAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetManagedPrefixListAssociationsRequest"}, + "output":{"shape":"GetManagedPrefixListAssociationsResult"}, + "documentation":"

Gets information about the resources that are associated with the specified managed prefix list.

" + }, + "GetManagedPrefixListEntries":{ + "name":"GetManagedPrefixListEntries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetManagedPrefixListEntriesRequest"}, + "output":{"shape":"GetManagedPrefixListEntriesResult"}, + "documentation":"

Gets information about the entries for a specified managed prefix list.

" + }, "GetPasswordData":{ "name":"GetPasswordData", "http":{ @@ -3071,7 +3142,7 @@ }, "input":{"shape":"ModifyAvailabilityZoneGroupRequest"}, "output":{"shape":"ModifyAvailabilityZoneGroupResult"}, - "documentation":"

Enables or disables an Availability Zone group for your account.

Use describe-availability-zones to view the value for GroupName.

" + "documentation":"

Enables or disables an Availability Zone group for your account.

Use DescribeAvailabilityZones to view the value for GroupName.

" }, "ModifyCapacityReservation":{ "name":"ModifyCapacityReservation", @@ -3101,7 +3172,7 @@ }, "input":{"shape":"ModifyDefaultCreditSpecificationRequest"}, "output":{"shape":"ModifyDefaultCreditSpecificationResult"}, - "documentation":"

Modifies the default credit option for CPU usage of burstable performance instances. The default credit option is set at the account level per AWS Region, and is specified per instance family. All new burstable performance instances in the account launch using the default credit option.

ModifyDefaultCreditSpecification is an asynchronous operation, which works at an AWS Region level and modifies the credit option for each Availability Zone. All zones in a Region are updated within five minutes. But if instances are launched during this operation, they might not get the new credit option until the zone is updated. To verify whether the update has occurred, you can call GetDefaultCreditSpecification and check DefaultCreditSpecification for updates.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Modifies the default credit option for CPU usage of burstable performance instances. The default credit option is set at the account level per AWS Region, and is specified per instance family. All new burstable performance instances in the account launch using the default credit option.

ModifyDefaultCreditSpecification is an asynchronous operation, which works at an AWS Region level and modifies the credit option for each Availability Zone. All zones in a Region are updated within five minutes. But if instances are launched during this operation, they might not get the new credit option until the zone is updated. To verify whether the update has occurred, you can call GetDefaultCreditSpecification and check DefaultCreditSpecification for updates.

For more information, see Burstable performance instances in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifyEbsDefaultKmsKeyId":{ "name":"ModifyEbsDefaultKmsKeyId", @@ -3177,7 +3248,7 @@ "requestUri":"/" }, "input":{"shape":"ModifyInstanceAttributeRequest"}, - "documentation":"

Modifies the specified attribute of the specified instance. You can specify only one attribute at a time.

Note: Using this action to change the security groups associated with an elastic network interface (ENI) attached to an instance in a VPC can result in an error if the instance has more than one ENI. To change the security groups associated with an ENI attached to an instance that has multiple ENIs, we recommend that you use the ModifyNetworkInterfaceAttribute action.

To modify some attributes, the instance must be stopped. For more information, see Modifying Attributes of a Stopped Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Modifies the specified attribute of the specified instance. You can specify only one attribute at a time.

Note: Using this action to change the security groups associated with an elastic network interface (ENI) attached to an instance in a VPC can result in an error if the instance has more than one ENI. To change the security groups associated with an ENI attached to an instance that has multiple ENIs, we recommend that you use the ModifyNetworkInterfaceAttribute action.

To modify some attributes, the instance must be stopped. For more information, see Modifying attributes of a stopped instance in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifyInstanceCapacityReservationAttributes":{ "name":"ModifyInstanceCapacityReservationAttributes", @@ -3197,7 +3268,7 @@ }, "input":{"shape":"ModifyInstanceCreditSpecificationRequest"}, "output":{"shape":"ModifyInstanceCreditSpecificationResult"}, - "documentation":"

Modifies the credit option for CPU usage on a running or stopped burstable performance instance. The credit options are standard and unlimited.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Modifies the credit option for CPU usage on a running or stopped burstable performance instance. The credit options are standard and unlimited.

For more information, see Burstable performance instances in the Amazon Elastic Compute Cloud User Guide.

" }, "ModifyInstanceEventStartTime":{ "name":"ModifyInstanceEventStartTime", @@ -3217,7 +3288,7 @@ }, "input":{"shape":"ModifyInstanceMetadataOptionsRequest"}, "output":{"shape":"ModifyInstanceMetadataOptionsResult"}, - "documentation":"

Modify the instance metadata parameters on a running or stopped instance. When you modify the parameters on a stopped instance, they are applied when the instance is started. When you modify the parameters on a running instance, the API responds with a state of “pending”. After the parameter modifications are successfully applied to the instance, the state of the modifications changes from “pending” to “applied” in subsequent describe-instances API calls. For more information, see Instance Metadata and User Data.

" + "documentation":"

Modify the instance metadata parameters on a running or stopped instance. When you modify the parameters on a stopped instance, they are applied when the instance is started. When you modify the parameters on a running instance, the API responds with a state of “pending”. After the parameter modifications are successfully applied to the instance, the state of the modifications changes from “pending” to “applied” in subsequent describe-instances API calls. For more information, see Instance metadata and user data.

" }, "ModifyInstancePlacement":{ "name":"ModifyInstancePlacement", @@ -3239,6 +3310,16 @@ "output":{"shape":"ModifyLaunchTemplateResult"}, "documentation":"

Modifies a launch template. You can specify which version of the launch template to set as the default version. When launching an instance, the default version applies when a launch template version is not specified.

" }, + "ModifyManagedPrefixList":{ + "name":"ModifyManagedPrefixList", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyManagedPrefixListRequest"}, + "output":{"shape":"ModifyManagedPrefixListResult"}, + "documentation":"

Modifies the specified managed prefix list.

Adding or removing entries in a prefix list creates a new version of the prefix list. Changing the name of the prefix list does not affect the version.

If you specify a current version number that does not match the true current version number, the request fails.

" + }, "ModifyNetworkInterfaceAttribute":{ "name":"ModifyNetworkInterfaceAttribute", "http":{ @@ -3422,7 +3503,7 @@ }, "input":{"shape":"ModifyVpnConnectionRequest"}, "output":{"shape":"ModifyVpnConnectionResult"}, - "documentation":"

Modifies the target gateway of an AWS Site-to-Site VPN connection. The following migration options are available:

  • An existing virtual private gateway to a new virtual private gateway

  • An existing virtual private gateway to a transit gateway

  • An existing transit gateway to a new transit gateway

  • An existing transit gateway to a virtual private gateway

Before you perform the migration to the new gateway, you must configure the new gateway. Use CreateVpnGateway to create a virtual private gateway, or CreateTransitGateway to create a transit gateway.

This step is required when you migrate from a virtual private gateway with static routes to a transit gateway.

You must delete the static routes before you migrate to the new gateway.

Keep a copy of the static route before you delete it. You will need to add back these routes to the transit gateway after the VPN connection migration is complete.

After you migrate to the new gateway, you might need to modify your VPC route table. Use CreateRoute and DeleteRoute to make the changes described in VPN Gateway Target Modification Required VPC Route Table Updates in the AWS Site-to-Site VPN User Guide.

When the new gateway is a transit gateway, modify the transit gateway route table to allow traffic between the VPC and the AWS Site-to-Site VPN connection. Use CreateTransitGatewayRoute to add the routes.

If you deleted VPN static routes, you must add the static routes to the transit gateway route table.

After you perform this operation, the AWS VPN endpoint's IP addresses on the AWS side and the tunnel options remain intact. Your AWS Site-to-Site VPN connection will be temporarily unavailable for a brief period while we provision the new endpoints.

" + "documentation":"

Modifies the customer gateway or the target gateway of an AWS Site-to-Site VPN connection. To modify the target gateway, the following migration options are available:

  • An existing virtual private gateway to a new virtual private gateway

  • An existing virtual private gateway to a transit gateway

  • An existing transit gateway to a new transit gateway

  • An existing transit gateway to a virtual private gateway

Before you perform the migration to the new gateway, you must configure the new gateway. Use CreateVpnGateway to create a virtual private gateway, or CreateTransitGateway to create a transit gateway.

This step is required when you migrate from a virtual private gateway with static routes to a transit gateway.

You must delete the static routes before you migrate to the new gateway.

Keep a copy of the static route before you delete it. You will need to add back these routes to the transit gateway after the VPN connection migration is complete.

After you migrate to the new gateway, you might need to modify your VPC route table. Use CreateRoute and DeleteRoute to make the changes described in VPN Gateway Target Modification Required VPC Route Table Updates in the AWS Site-to-Site VPN User Guide.

When the new gateway is a transit gateway, modify the transit gateway route table to allow traffic between the VPC and the AWS Site-to-Site VPN connection. Use CreateTransitGatewayRoute to add the routes.

If you deleted VPN static routes, you must add the static routes to the transit gateway route table.

After you perform this operation, the AWS VPN endpoint's IP addresses on the AWS side and the tunnel options remain intact. Your AWS Site-to-Site VPN connection will be temporarily unavailable for a brief period while we provision the new endpoints.

" }, "ModifyVpnTunnelCertificate":{ "name":"ModifyVpnTunnelCertificate", @@ -3452,7 +3533,7 @@ }, "input":{"shape":"MonitorInstancesRequest"}, "output":{"shape":"MonitorInstancesResult"}, - "documentation":"

Enables detailed monitoring for a running instance. Otherwise, basic monitoring is enabled. For more information, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

To disable detailed monitoring, see .

" + "documentation":"

Enables detailed monitoring for a running instance. Otherwise, basic monitoring is enabled. For more information, see Monitoring your instances and volumes in the Amazon Elastic Compute Cloud User Guide.

To disable detailed monitoring, see .

" }, "MoveAddressToVpc":{ "name":"MoveAddressToVpc", @@ -3511,7 +3592,7 @@ "requestUri":"/" }, "input":{"shape":"RebootInstancesRequest"}, - "documentation":"

Requests a reboot of the specified instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

If an instance does not cleanly shut down within four minutes, Amazon EC2 performs a hard reboot.

For more information about troubleshooting, see Getting Console Output and Rebooting Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Requests a reboot of the specified instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored.

If an instance does not cleanly shut down within four minutes, Amazon EC2 performs a hard reboot.

For more information about troubleshooting, see Getting console output and rebooting instances in the Amazon Elastic Compute Cloud User Guide.

" }, "RegisterImage":{ "name":"RegisterImage", @@ -3523,6 +3604,16 @@ "output":{"shape":"RegisterImageResult"}, "documentation":"

Registers an AMI. When you're creating an AMI, this is the final step you must complete before you can launch an instance from the AMI. For more information about creating AMIs, see Creating Your Own AMIs in the Amazon Elastic Compute Cloud User Guide.

For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself.

You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using the block device mapping. For more information, see Launching a Linux Instance from a Backup in the Amazon Elastic Compute Cloud User Guide.

You can't register an image where a secondary (non-root) snapshot has AWS Marketplace product codes.

Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to verify the subscription status for package updates. To create a new AMI for operating systems that require a billing product code, instead of registering the AMI, do the following to preserve the billing product code association:

  1. Launch an instance from an existing AMI with that billing product code.

  2. Customize the instance.

  3. Create an AMI from the instance using CreateImage.

If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an AMI with a billing product code, make sure that the Reserved Instance has the matching billing product code. If you purchase a Reserved Instance without the matching billing product code, the Reserved Instance will not be applied to the On-Demand Instance. For information about how to obtain the platform details and billing information of an AMI, see Obtaining Billing Information in the Amazon Elastic Compute Cloud User Guide.

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

" }, + "RegisterInstanceEventNotificationAttributes":{ + "name":"RegisterInstanceEventNotificationAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterInstanceEventNotificationAttributesRequest"}, + "output":{"shape":"RegisterInstanceEventNotificationAttributesResult"}, + "documentation":"

Registers a set of tag keys to include in scheduled event notifications for your resources.

To remove tags, use .

" + }, "RegisterTransitGatewayMulticastGroupMembers":{ "name":"RegisterTransitGatewayMulticastGroupMembers", "http":{ @@ -3677,7 +3768,7 @@ }, "input":{"shape":"RequestSpotFleetRequest"}, "output":{"shape":"RequestSpotFleetResponse"}, - "documentation":"

Creates a Spot Fleet request.

The Spot Fleet request specifies the total target capacity and the On-Demand target capacity. Amazon EC2 calculates the difference between the total capacity and On-Demand capacity, and launches the difference as Spot capacity.

You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

By default, the Spot Fleet requests Spot Instances in the Spot Instance pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

Alternatively, you can specify that the Spot Fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot Instances in your Spot Fleet are in different Spot pools, you can improve the availability of your fleet.

You can specify tags for the Spot Fleet request and instances launched by the fleet. You cannot tag other resource types in a Spot Fleet request because only the spot-fleet-request and instance resource types are supported.

For more information, see Spot Fleet Requests in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Creates a Spot Fleet request.

The Spot Fleet request specifies the total target capacity and the On-Demand target capacity. Amazon EC2 calculates the difference between the total capacity and On-Demand capacity, and launches the difference as Spot capacity.

You can submit a single request that includes multiple launch specifications that vary by instance type, AMI, Availability Zone, or subnet.

By default, the Spot Fleet requests Spot Instances in the Spot Instance pool where the price per unit is the lowest. Each launch specification can include its own instance weighting that reflects the value of the instance type to your application workload.

Alternatively, you can specify that the Spot Fleet distribute the target capacity across the Spot pools included in its launch specifications. By ensuring that the Spot Instances in your Spot Fleet are in different Spot pools, you can improve the availability of your fleet.

You can specify tags for the Spot Fleet request and instances launched by the fleet. You cannot tag other resource types in a Spot Fleet request because only the spot-fleet-request and instance resource types are supported.

For more information, see Spot Fleet requests in the Amazon EC2 User Guide for Linux Instances.

" }, "RequestSpotInstances":{ "name":"RequestSpotInstances", @@ -3687,7 +3778,7 @@ }, "input":{"shape":"RequestSpotInstancesRequest"}, "output":{"shape":"RequestSpotInstancesResult"}, - "documentation":"

Creates a Spot Instance request.

For more information, see Spot Instance Requests in the Amazon EC2 User Guide for Linux Instances.

" + "documentation":"

Creates a Spot Instance request.

For more information, see Spot Instance requests in the Amazon EC2 User Guide for Linux Instances.

" }, "ResetEbsDefaultKmsKeyId":{ "name":"ResetEbsDefaultKmsKeyId", @@ -3755,6 +3846,16 @@ "output":{"shape":"RestoreAddressToClassicResult"}, "documentation":"

Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.

" }, + "RestoreManagedPrefixListVersion":{ + "name":"RestoreManagedPrefixListVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RestoreManagedPrefixListVersionRequest"}, + "output":{"shape":"RestoreManagedPrefixListVersionResult"}, + "documentation":"

Restores the entries from a previous version of a managed prefix list to a new version of the prefix list.

" + }, "RevokeClientVpnIngress":{ "name":"RevokeClientVpnIngress", "http":{ @@ -3791,7 +3892,7 @@ }, "input":{"shape":"RunInstancesRequest"}, "output":{"shape":"Reservation"}, - "documentation":"

Launches the specified number of instances using an AMI for which you have permissions.

You can specify a number of options, or leave the default options. The following rules apply:

  • [EC2-VPC] If you don't specify a subnet ID, we choose a default subnet from your default VPC for you. If you don't have a default VPC, you must specify a subnet ID in the request.

  • [EC2-Classic] If don't specify an Availability Zone, we choose one for you.

  • Some instance types must be launched into a VPC. If you do not have a default VPC, or if you do not specify a subnet ID, the request fails. For more information, see Instance Types Available Only in a VPC.

  • [EC2-VPC] All instances have a network interface with a primary private IPv4 address. If you don't specify this address, we choose one from the IPv4 range of your subnet.

  • Not all instance types support IPv6 addresses. For more information, see Instance Types.

  • If you don't specify a security group ID, we use the default security group. For more information, see Security Groups.

  • If any of the AMIs have a product code attached for which the user has not subscribed, the request fails.

You can create a launch template, which is a resource that contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify the launch template instead of specifying the launch parameters.

To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances.

An instance is ready for you to use when it's in the running state. You can check the state of your instance using DescribeInstances. You can tag instances and EBS volumes during launch, after launch, or both. For more information, see CreateTags and Tagging Your Amazon EC2 Resources.

Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide.

For troubleshooting, see What To Do If An Instance Immediately Terminates, and Troubleshooting Connecting to Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Launches the specified number of instances using an AMI for which you have permissions.

You can specify a number of options, or leave the default options. The following rules apply:

  • [EC2-VPC] If you don't specify a subnet ID, we choose a default subnet from your default VPC for you. If you don't have a default VPC, you must specify a subnet ID in the request.

  • [EC2-Classic] If don't specify an Availability Zone, we choose one for you.

  • Some instance types must be launched into a VPC. If you do not have a default VPC, or if you do not specify a subnet ID, the request fails. For more information, see Instance types available only in a VPC.

  • [EC2-VPC] All instances have a network interface with a primary private IPv4 address. If you don't specify this address, we choose one from the IPv4 range of your subnet.

  • Not all instance types support IPv6 addresses. For more information, see Instance types.

  • If you don't specify a security group ID, we use the default security group. For more information, see Security groups.

  • If any of the AMIs have a product code attached for which the user has not subscribed, the request fails.

You can create a launch template, which is a resource that contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify the launch template instead of specifying the launch parameters.

To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances.

An instance is ready for you to use when it's in the running state. You can check the state of your instance using DescribeInstances. You can tag instances and EBS volumes during launch, after launch, or both. For more information, see CreateTags and Tagging your Amazon EC2 resources.

Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key pairs in the Amazon Elastic Compute Cloud User Guide.

For troubleshooting, see What to do if an instance immediately terminates, and Troubleshooting connecting to your instance in the Amazon Elastic Compute Cloud User Guide.

" }, "RunScheduledInstances":{ "name":"RunScheduledInstances", @@ -3840,7 +3941,7 @@ "requestUri":"/" }, "input":{"shape":"SendDiagnosticInterruptRequest"}, - "documentation":"

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).

In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.

Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.

For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic Interrupt (Linux instances) or Send a Diagnostic Interrupt (Windows instances).

" + "documentation":"

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI).

In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace.

Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks.

For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a diagnostic interrupt (Linux instances) or Send a Diagnostic Interrupt (Windows instances).

" }, "StartInstances":{ "name":"StartInstances", @@ -3850,7 +3951,7 @@ }, "input":{"shape":"StartInstancesRequest"}, "output":{"shape":"StartInstancesResult"}, - "documentation":"

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

For more information, see Stopping Instances in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

For more information, see Stopping instances in the Amazon Elastic Compute Cloud User Guide.

" }, "StartVpcEndpointServicePrivateDnsVerification":{ "name":"StartVpcEndpointServicePrivateDnsVerification", @@ -3870,7 +3971,7 @@ }, "input":{"shape":"StopInstancesRequest"}, "output":{"shape":"StopInstancesResult"}, - "documentation":"

Stops an Amazon EBS-backed instance.

You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating Interrupted Spot Instances in the Amazon Elastic Compute Cloud User Guide.

When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.

Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshooting Stopping Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Stops an Amazon EBS-backed instance.

You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your Windows instance, Amazon EC2 charges you for a full instance hour. If you stop and restart your Windows instance, a new instance hour begins and Amazon EC2 charges you for another full instance hour even if you are still within the same 60-minute period when it was stopped. Every time you start your Linux instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon Elastic Compute Cloud User Guide.

When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs.

Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon Elastic Compute Cloud User Guide.

When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshooting stopping your instance in the Amazon Elastic Compute Cloud User Guide.

" }, "TerminateClientVpnConnections":{ "name":"TerminateClientVpnConnections", @@ -3890,7 +3991,7 @@ }, "input":{"shape":"TerminateInstancesRequest"}, "output":{"shape":"TerminateInstancesResult"}, - "documentation":"

Shuts down the specified instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

If you specify multiple instances and the request fails (for example, because of a single incorrect instance ID), none of the instances are terminated.

Terminated instances remain visible after termination (for approximately one hour).

By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance Lifecycle in the Amazon Elastic Compute Cloud User Guide.

For more information about troubleshooting, see Troubleshooting Terminating Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Shuts down the specified instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

If you specify multiple instances and the request fails (for example, because of a single incorrect instance ID), none of the instances are terminated.

Terminated instances remain visible after termination (for approximately one hour).

By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance lifecycle in the Amazon Elastic Compute Cloud User Guide.

For more information about troubleshooting, see Troubleshooting terminating your instance in the Amazon Elastic Compute Cloud User Guide.

" }, "UnassignIpv6Addresses":{ "name":"UnassignIpv6Addresses", @@ -3919,7 +4020,7 @@ }, "input":{"shape":"UnmonitorInstancesRequest"}, "output":{"shape":"UnmonitorInstancesResult"}, - "documentation":"

Disables detailed monitoring for a running instance. For more information, see Monitoring Your Instances and Volumes in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Disables detailed monitoring for a running instance. For more information, see Monitoring your instances and volumes in the Amazon Elastic Compute Cloud User Guide.

" }, "UpdateSecurityGroupRuleDescriptionsEgress":{ "name":"UpdateSecurityGroupRuleDescriptionsEgress", @@ -4187,6 +4288,27 @@ "fulfilled" ] }, + "AddPrefixListEntries":{ + "type":"list", + "member":{"shape":"AddPrefixListEntry"}, + "max":1000, + "min":0 + }, + "AddPrefixListEntry":{ + "type":"structure", + "required":["Cidr"], + "members":{ + "Cidr":{ + "shape":"String", + "documentation":"

The CIDR block.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description for the entry.

Constraints: Up to 255 characters in length.

" + } + }, + "documentation":"

An entry for a prefix list.

" + }, "Address":{ "type":"structure", "members":{ @@ -5280,7 +5402,7 @@ }, "AccessGroupId":{ "shape":"String", - "documentation":"

The ID of the Active Directory group to grant access.

" + "documentation":"

The ID of the group to grant access to, for example, the Active Directory group or identity provider (IdP) group.

" }, "AuthorizeAllGroups":{ "shape":"Boolean", @@ -5428,7 +5550,7 @@ "members":{ "State":{ "shape":"AvailabilityZoneState", - "documentation":"

The state of the Availability Zone or Local Zone.

", + "documentation":"

The state of the Zone.

", "locationName":"zoneState" }, "OptInStatus":{ @@ -5438,7 +5560,7 @@ }, "Messages":{ "shape":"AvailabilityZoneMessageList", - "documentation":"

Any messages about the Availability Zone or Local Zone.

", + "documentation":"

Any messages about the Zone.

", "locationName":"messageSet" }, "RegionName":{ @@ -5448,12 +5570,12 @@ }, "ZoneName":{ "shape":"String", - "documentation":"

The name of the Availability Zone or Local Zone.

", + "documentation":"

The name of the Zone.

", "locationName":"zoneName" }, "ZoneId":{ "shape":"String", - "documentation":"

The ID of the Availability Zone or Local Zone.

", + "documentation":"

The ID of the Zone.

", "locationName":"zoneId" }, "GroupName":{ @@ -5465,9 +5587,24 @@ "shape":"String", "documentation":"

The name of the location from which the address is advertised.

", "locationName":"networkBorderGroup" + }, + "ZoneType":{ + "shape":"String", + "documentation":"

The type of zone. The valid values are availability-zone and local-zone.

", + "locationName":"zoneType" + }, + "ParentZoneName":{ + "shape":"String", + "documentation":"

The name of the zone that handles some of the Local Zone control plane operations, such as API calls.

", + "locationName":"parentZoneName" + }, + "ParentZoneId":{ + "shape":"String", + "documentation":"

The ID of the zone that handles some of the Local Zone control plane operations, such as API calls.

", + "locationName":"parentZoneId" } }, - "documentation":"

Describes an Availability Zone or Local Zone.

" + "documentation":"

Describes a Zone.

" }, "AvailabilityZoneList":{ "type":"list", @@ -5481,11 +5618,11 @@ "members":{ "Message":{ "shape":"String", - "documentation":"

The message about the Availability Zone or Local Zone.

", + "documentation":"

The message about the Zone.

", "locationName":"message" } }, - "documentation":"

Describes a message about an Availability Zone or Local Zone.

" + "documentation":"

Describes a message about a Zone.

" }, "AvailabilityZoneMessageList":{ "type":"list", @@ -5542,6 +5679,9 @@ } }, "BareMetalFlag":{"type":"boolean"}, + "BaselineBandwidthInMbps":{"type":"integer"}, + "BaselineIops":{"type":"integer"}, + "BaselineThroughputInMBps":{"type":"double"}, "BatchState":{ "type":"string", "enum":[ @@ -6263,7 +6403,7 @@ "members":{ "CapacityReservationPreference":{ "shape":"CapacityReservationPreference", - "documentation":"

Indicates the instance's Capacity Reservation preferences. Possible preferences include:

  • open - The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).

  • none - The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance.

" + "documentation":"

Indicates the instance's Capacity Reservation preferences. Possible preferences include:

  • open - The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).

  • none - The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance.

When CapacityReservationPreference is not specified, it defaults to open.

" }, "CapacityReservationTarget":{ "shape":"CapacityReservationTarget", @@ -6533,9 +6673,14 @@ "shape":"CertificateAuthentication", "documentation":"

Information about the authentication certificates, if applicable.

", "locationName":"mutualAuthentication" + }, + "FederatedAuthentication":{ + "shape":"FederatedAuthentication", + "documentation":"

Information about the IAM SAML identity provider, if applicable.

", + "locationName":"federatedAuthentication" } }, - "documentation":"

Describes the authentication methods used by a Client VPN endpoint. Client VPN supports Active Directory and mutual authentication. For more information, see Authentication in the AWS Client VPN Administrator Guide.

" + "documentation":"

Describes the authentication methods used by a Client VPN endpoint. For more information, see Authentication in the AWS Client VPN Administrator Guide.

" }, "ClientVpnAuthenticationList":{ "type":"list", @@ -6549,7 +6694,7 @@ "members":{ "Type":{ "shape":"ClientVpnAuthenticationType", - "documentation":"

The type of client authentication to be used. Specify certificate-authentication to use certificate-based authentication, or directory-service-authentication to use Active Directory authentication.

" + "documentation":"

The type of client authentication to be used.

" }, "ActiveDirectory":{ "shape":"DirectoryServiceAuthenticationRequest", @@ -6558,9 +6703,13 @@ "MutualAuthentication":{ "shape":"CertificateAuthenticationRequest", "documentation":"

Information about the authentication certificates to be used, if applicable. You must provide this information if Type is certificate-authentication.

" + }, + "FederatedAuthentication":{ + "shape":"FederatedAuthenticationRequest", + "documentation":"

Information about the IAM SAML identity provider to be used, if applicable. You must provide this information if Type is federated-authentication.

" } }, - "documentation":"

Describes the authentication method to be used by a Client VPN endpoint. Client VPN supports Active Directory and mutual authentication. For more information, see Authentication in the AWS Client VPN Administrator Guide.

" + "documentation":"

Describes the authentication method to be used by a Client VPN endpoint. For more information, see Authentication in the AWS Client VPN Administrator Guide.

" }, "ClientVpnAuthenticationRequestList":{ "type":"list", @@ -6570,7 +6719,8 @@ "type":"string", "enum":[ "certificate-authentication", - "directory-service-authentication" + "directory-service-authentication", + "federated-authentication" ] }, "ClientVpnAuthorizationRuleStatus":{ @@ -7710,6 +7860,11 @@ "documentation":"

A DHCP configuration option.

", "locationName":"dhcpConfiguration" }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the DHCP option.

", + "locationName":"TagSpecification" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -7742,6 +7897,11 @@ "VpcId":{ "shape":"VpcId", "documentation":"

The ID of the VPC for which to create the egress-only internet gateway.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the egress-only internet gateway.

", + "locationName":"TagSpecification" } } }, @@ -7960,7 +8120,7 @@ }, "LogFormat":{ "shape":"String", - "documentation":"

The fields to include in the flow log record, in the order in which they should appear. For a list of available fields, see Flow Log Records. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must specify at least one field.

Specify the fields using the ${field-id} format, separated by spaces. For the AWS CLI, use single quotation marks (' ') to surround the parameter value.

Only applicable to flow logs that are published to an Amazon S3 bucket.

" + "documentation":"

The fields to include in the flow log record, in the order in which they should appear. For a list of available fields, see Flow Log Records. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must specify at least one field.

Specify the fields using the ${field-id} format, separated by spaces. For the AWS CLI, use single quotation marks (' ') to surround the parameter value.

" }, "TagSpecifications":{ "shape":"TagSpecificationList", @@ -8098,7 +8258,7 @@ "members":{ "Description":{ "shape":"String", - "documentation":"

A description for the conversion task or the resource being exported. The maximum length is 255 bytes.

", + "documentation":"

A description for the conversion task or the resource being exported. The maximum length is 255 characters.

", "locationName":"description" }, "ExportToS3Task":{ @@ -8115,6 +8275,11 @@ "shape":"ExportEnvironment", "documentation":"

The target virtualization environment.

", "locationName":"targetEnvironment" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the instance export task during creation.

", + "locationName":"TagSpecification" } } }, @@ -8131,6 +8296,11 @@ "CreateInternetGatewayRequest":{ "type":"structure", "members":{ + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the internet gateway.

", + "locationName":"TagSpecification" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -8160,6 +8330,11 @@ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "locationName":"dryRun" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the new key pair.

", + "locationName":"TagSpecification" } } }, @@ -8204,6 +8379,11 @@ "shape":"LaunchTemplate", "documentation":"

Information about the launch template.

", "locationName":"launchTemplate" + }, + "Warning":{ + "shape":"ValidationWarning", + "documentation":"

If the launch template contains parameters or parameter combinations that are not valid, an error code and an error message are returned for each issue that's found.

", + "locationName":"warning" } } }, @@ -8248,6 +8428,11 @@ "shape":"LaunchTemplateVersion", "documentation":"

Information about the launch template version.

", "locationName":"launchTemplateVersion" + }, + "Warning":{ + "shape":"ValidationWarning", + "documentation":"

If the new version of the launch template contains parameters or parameter combinations that are not valid, an error code and an error message are returned for each issue that's found.

", + "locationName":"warning" } } }, @@ -8302,6 +8487,11 @@ "shape":"VpcId", "documentation":"

The ID of the VPC.

" }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the local gateway route table VPC association.

", + "locationName":"TagSpecification" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" @@ -8318,6 +8508,57 @@ } } }, + "CreateManagedPrefixListRequest":{ + "type":"structure", + "required":[ + "PrefixListName", + "MaxEntries", + "AddressFamily" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "PrefixListName":{ + "shape":"String", + "documentation":"

A name for the prefix list.

Constraints: Up to 255 characters in length. The name cannot start with com.amazonaws.

" + }, + "Entries":{ + "shape":"AddPrefixListEntries", + "documentation":"

One or more entries for the prefix list.

", + "locationName":"Entry" + }, + "MaxEntries":{ + "shape":"Integer", + "documentation":"

The maximum number of entries for the prefix list.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the prefix list during creation.

", + "locationName":"TagSpecification" + }, + "AddressFamily":{ + "shape":"String", + "documentation":"

The IP address type.

Valid Values: IPv4 | IPv6

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraints: Up to 255 UTF-8 characters in length.

", + "idempotencyToken":true + } + } + }, + "CreateManagedPrefixListResult":{ + "type":"structure", + "members":{ + "PrefixList":{ + "shape":"ManagedPrefixList", + "documentation":"

Information about the prefix list.

", + "locationName":"prefixList" + } + } + }, "CreateNatGatewayRequest":{ "type":"structure", "required":[ @@ -8376,7 +8617,7 @@ "members":{ "CidrBlock":{ "shape":"String", - "documentation":"

The IPv4 network range to allow or deny, in CIDR notation (for example 172.16.0.0/24).

", + "documentation":"

The IPv4 network range to allow or deny, in CIDR notation (for example 172.16.0.0/24). We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

", "locationName":"cidrBlock" }, "DryRun":{ @@ -8439,6 +8680,11 @@ "shape":"VpcId", "documentation":"

The ID of the VPC.

", "locationName":"vpcId" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the network ACL.

", + "locationName":"TagSpecification" } } }, @@ -8545,6 +8791,11 @@ "shape":"SubnetId", "documentation":"

The ID of the subnet to associate with the network interface.

", "locationName":"subnetId" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the new network interface.

", + "locationName":"TagSpecification" } }, "documentation":"

Contains the parameters for CreateNetworkInterface.

" @@ -8581,6 +8832,20 @@ "PartitionCount":{ "shape":"Integer", "documentation":"

The number of partitions. Valid only when Strategy is set to partition.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the new placement group.

", + "locationName":"TagSpecification" + } + } + }, + "CreatePlacementGroupResult":{ + "type":"structure", + "members":{ + "PlacementGroup":{ + "shape":"PlacementGroup", + "locationName":"placementGroup" } } }, @@ -8633,7 +8898,7 @@ "members":{ "DestinationCidrBlock":{ "shape":"String", - "documentation":"

The IPv4 CIDR address block used for the destination match. Routing decisions are based on the most specific match.

", + "documentation":"

The IPv4 CIDR address block used for the destination match. Routing decisions are based on the most specific match. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

", "locationName":"destinationCidrBlock" }, "DestinationIpv6CidrBlock":{ @@ -8641,6 +8906,10 @@ "documentation":"

The IPv6 CIDR block used for the destination match. Routing decisions are based on the most specific match.

", "locationName":"destinationIpv6CidrBlock" }, + "DestinationPrefixListId":{ + "shape":"PrefixListResourceId", + "documentation":"

The ID of a prefix list used for the destination match.

" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -8747,6 +9016,11 @@ "shape":"VpcId", "documentation":"

[EC2-VPC] The ID of the VPC. Required for EC2-VPC.

" }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the security group.

", + "locationName":"TagSpecification" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -8761,6 +9035,11 @@ "shape":"String", "documentation":"

The ID of the security group.

", "locationName":"groupId" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags assigned to the security group.

", + "locationName":"tagSet" } } }, @@ -8865,9 +9144,14 @@ "VpcId" ], "members":{ + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the subnet.

", + "locationName":"TagSpecification" + }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The Availability Zone or Local Zone for the subnet.

Default: AWS selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet.

To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about the Regions that support Local Zones, see Available Regions in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The Availability Zone or Local Zone for the subnet.

Default: AWS selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet.

To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about the Regions that support Local Zones, see Available Regions in the Amazon Elastic Compute Cloud User Guide.

To create a subnet in an Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost ARN.

" }, "AvailabilityZoneId":{ "shape":"String", @@ -8875,7 +9159,7 @@ }, "CidrBlock":{ "shape":"String", - "documentation":"

The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24.

" + "documentation":"

The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

" }, "Ipv6CidrBlock":{ "shape":"String", @@ -8883,7 +9167,7 @@ }, "OutpostArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Outpost.

" + "documentation":"

The Amazon Resource Name (ARN) of the Outpost. If you specify an Outpost ARN, you must also specify the Availability Zone of the Outpost subnet.

" }, "VpcId":{ "shape":"VpcId", @@ -9695,7 +9979,7 @@ "members":{ "CidrBlock":{ "shape":"String", - "documentation":"

The IPv4 network range for the VPC, in CIDR notation. For example, 10.0.0.0/16.

" + "documentation":"

The IPv4 network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

" }, "AmazonProvidedIpv6CidrBlock":{ "shape":"Boolean", @@ -9723,6 +10007,11 @@ "Ipv6CidrBlockNetworkBorderGroup":{ "shape":"String", "documentation":"

The name of the location from which we advertise the IPV6 CIDR block. Use this parameter to limit the address to this location.

You must set AmazonProvidedIpv6CidrBlock to true to use this parameter.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to assign to the VPC.

", + "locationName":"TagSpecification" } } }, @@ -10260,12 +10549,15 @@ }, "DeleteKeyPairRequest":{ "type":"structure", - "required":["KeyName"], "members":{ "KeyName":{ "shape":"KeyPairName", "documentation":"

The name of the key pair.

" }, + "KeyPairId":{ + "shape":"KeyPairId", + "documentation":"

The ID of the key pair.

" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -10454,6 +10746,30 @@ } } }, + "DeleteManagedPrefixListRequest":{ + "type":"structure", + "required":["PrefixListId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "PrefixListId":{ + "shape":"PrefixListResourceId", + "documentation":"

The ID of the prefix list.

" + } + } + }, + "DeleteManagedPrefixListResult":{ + "type":"structure", + "members":{ + "PrefixList":{ + "shape":"ManagedPrefixList", + "documentation":"

Information about the prefix list.

", + "locationName":"prefixList" + } + } + }, "DeleteNatGatewayRequest":{ "type":"structure", "required":["NatGatewayId"], @@ -10664,6 +10980,10 @@ "documentation":"

The IPv6 CIDR range for the route. The value you specify must match the CIDR for the route exactly.

", "locationName":"destinationIpv6CidrBlock" }, + "DestinationPrefixListId":{ + "shape":"PrefixListResourceId", + "documentation":"

The ID of the prefix list for the route.

" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -11242,6 +11562,44 @@ }, "documentation":"

Contains the parameters for DeregisterImage.

" }, + "DeregisterInstanceEventNotificationAttributesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "InstanceTagAttribute":{ + "shape":"DeregisterInstanceTagAttributeRequest", + "documentation":"

Information about the tag keys to deregister.

" + } + } + }, + "DeregisterInstanceEventNotificationAttributesResult":{ + "type":"structure", + "members":{ + "InstanceTagAttribute":{ + "shape":"InstanceTagNotificationAttribute", + "documentation":"

The resulting set of tag keys.

", + "locationName":"instanceTagAttribute" + } + } + }, + "DeregisterInstanceTagAttributeRequest":{ + "type":"structure", + "members":{ + "IncludeAllTagsOfInstance":{ + "shape":"Boolean", + "documentation":"

Indicates whether to deregister all tag keys in the current Region. Specify false to deregister all tag keys.

" + }, + "InstanceTagKeys":{ + "shape":"InstanceTagKeySet", + "documentation":"

Information about the tag keys to deregister.

", + "locationName":"InstanceTagKey" + } + }, + "documentation":"

Information about the tag keys to deregister for the current Region. You can either specify individual tag keys or deregister all tag keys in the current Region. You must specify either IncludeAllTagsOfInstance or InstanceTagKeys in the request

" + }, "DeregisterTransitGatewayMulticastGroupMembersRequest":{ "type":"structure", "members":{ @@ -11393,17 +11751,17 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • group-name - For Availability Zones, use the Region name. For Local Zones, use the name of the group associated with the Local Zone (for example, us-west-2-lax-1).

  • message - The Availability Zone or Local Zone message.

  • opt-in-status - The opt in status (opted-in, and not-opted-in | opt-in-not-required).

  • region-name - The name of the Region for the Availability Zone or Local Zone (for example, us-east-1).

  • state - The state of the Availability Zone or Local Zone (available | information | impaired | unavailable).

  • zone-id - The ID of the Availability Zone (for example, use1-az1) or the Local Zone (for example, use usw2-lax1-az1).

  • zone-name - The name of the Availability Zone (for example, us-east-1a) or the Local Zone (for example, use us-west-2-lax-1a).

", + "documentation":"

The filters.

  • group-name - For Availability Zones, use the Region name. For Local Zones, use the name of the group associated with the Local Zone (for example, us-west-2-lax-1).

  • message - The Zone message.

  • opt-in-status - The opt in status (opted-in, and not-opted-in | opt-in-not-required).

  • region-name - The name of the Region for the Zone (for example, us-east-1).

  • state - The state of the Availability Zone or Local Zone (available | information | impaired | unavailable).

  • zone-id - The ID of the Availability Zone (for example, use1-az1) or the Local Zone (for example, use usw2-lax1-az1).

  • zone-name - The name of the Availability Zone (for example, us-east-1a) or the Local Zone (for example, use us-west-2-lax-1a).

", "locationName":"Filter" }, "ZoneNames":{ "shape":"ZoneNameStringList", - "documentation":"

The names of the Availability Zones and Local Zones.

", + "documentation":"

The names of the Zones.

", "locationName":"ZoneName" }, "ZoneIds":{ "shape":"ZoneIdStringList", - "documentation":"

The IDs of the Availability Zones and Local Zones.

", + "documentation":"

The IDs of the Zones.

", "locationName":"ZoneId" }, "AllAvailabilityZones":{ @@ -11422,7 +11780,7 @@ "members":{ "AvailabilityZones":{ "shape":"AvailabilityZoneList", - "documentation":"

Information about the Availability Zones and Local Zones.

", + "documentation":"

Information about the Zones.

", "locationName":"availabilityZoneInfo" } } @@ -12181,12 +12539,12 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the snapshot.

", + "documentation":"

The ID of the AWS account that enabled fast snapshot restores on the snapshot.

", "locationName":"ownerId" }, "OwnerAlias":{ "shape":"String", - "documentation":"

The alias of the snapshot owner.

", + "documentation":"

The AWS owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", "locationName":"ownerAlias" }, "EnablingTime":{ @@ -12234,7 +12592,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters. The possible values are:

  • availability-zone: The Availability Zone of the snapshot.

  • owner-id: The ID of the AWS account that owns the snapshot.

  • snapshot-id: The ID of the snapshot.

  • state: The state of fast snapshot restores for the snapshot (enabling | optimizing | enabled | disabling | disabled).

", + "documentation":"

The filters. The possible values are:

  • availability-zone: The Availability Zone of the snapshot.

  • owner-id: The ID of the AWS account that enabled fast snapshot restore on the snapshot.

  • snapshot-id: The ID of the snapshot.

  • state: The state of fast snapshot restores for the snapshot (enabling | optimizing | enabled | disabling | disabled).

", "locationName":"Filter" }, "MaxResults":{ @@ -12753,7 +13111,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • instance-id - The ID of the instance.

  • state - The state of the association (associating | associated | disassociating | disassociated).

", + "documentation":"

The filters.

  • instance-id - The ID of the instance.

  • state - The state of the association (associating | associated | disassociating).

", "locationName":"Filter" }, "MaxResults":{ @@ -13042,6 +13400,25 @@ } } }, + "DescribeInstanceEventNotificationAttributesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "DescribeInstanceEventNotificationAttributesResult":{ + "type":"structure", + "members":{ + "InstanceTagAttribute":{ + "shape":"InstanceTagNotificationAttribute", + "documentation":"

Information about the registered tag keys.

", + "locationName":"instanceTagAttribute" + } + } + }, "DescribeInstanceStatusRequest":{ "type":"structure", "members":{ @@ -13145,7 +13522,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • auto-recovery-supported - Indicates whether auto recovery is supported. (true | false)

  • bare-metal - Indicates whether it is a bare metal instance type. (true | false)

  • burstable-performance-supported - Indicates whether it is a burstable performance instance type. (true | false)

  • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family. (true | false)

  • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized. (supported | unsupported | default)

  • ebs-info.encryption-support - Indicates whether EBS encryption is supported. (supported | unsupported)

  • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier. (true | false)

  • hibernation-supported - Indicates whether On-Demand hibernation is supported. (true | false)

  • hypervisor - The hypervisor used. (nitro | xen)

  • instance-storage-info.disk.count - The number of local disks.

  • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

  • instance-storage-info.disk.type - The storage technology for the local instance storage disks. (hdd | ssd)

  • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

  • instance-storage-supported - Indicates whether the instance type has local instance storage. (true | false)

  • memory-info.size-in-mib - The memory size.

  • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required. (required | supported | unsupported)

  • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

  • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

  • network-info.ipv6-supported - Indicates whether the instance type supports IPv6. (true | false)

  • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

  • network-info.network-performance - Describes the network performance.

  • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

  • vcpu-info.default-cores - The default number of cores for the instance type.

  • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

  • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

  • auto-recovery-supported - Indicates whether auto recovery is supported. (true | false)

  • bare-metal - Indicates whether it is a bare metal instance type. (true | false)

  • burstable-performance-supported - Indicates whether it is a burstable performance instance type. (true | false)

  • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family. (true | false)

  • ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps.

  • ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MBps.

  • ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type.

  • ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps.

  • ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MBps.

  • ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type.

  • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized. (supported | unsupported | default)

  • ebs-info.encryption-support - Indicates whether EBS encryption is supported. (supported | unsupported)

  • ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported or required. (required | supported | unsupported)

  • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier. (true | false)

  • hibernation-supported - Indicates whether On-Demand hibernation is supported. (true | false)

  • hypervisor - The hypervisor used. (nitro | xen)

  • instance-storage-info.disk.count - The number of local disks.

  • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

  • instance-storage-info.disk.type - The storage technology for the local instance storage disks. (hdd | ssd)

  • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

  • instance-storage-supported - Indicates whether the instance type has local instance storage. (true | false)

  • memory-info.size-in-mib - The memory size.

  • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required. (required | supported | unsupported)

  • network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA). (true | false)

  • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

  • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

  • network-info.ipv6-supported - Indicates whether the instance type supports IPv6. (true | false)

  • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

  • network-info.network-performance - Describes the network performance.

  • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

  • vcpu-info.default-cores - The default number of cores for the instance type.

  • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

  • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

", "locationName":"Filter" }, "MaxResults":{ @@ -13313,7 +13690,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • fingerprint - The fingerprint of the key pair.

  • key-name - The name of the key pair.

", + "documentation":"

The filters.

  • key-pair-id - The ID of the key pair.

  • fingerprint - The fingerprint of the key pair.

  • key-name - The name of the key pair.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

", "locationName":"Filter" }, "KeyNames":{ @@ -13463,7 +13840,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

  • local-gateway-id - The ID of a local gateway.

  • local-gateway-route-table-id - The ID of the local gateway route table.

  • local-gateway-route-table-virtual-interface-group-association-id - The ID of the association.

  • local-gateway-route-table-virtual-interface-group-id - The ID of the virtual interface group.

  • state - The state of the association.

", "locationName":"Filter" }, "MaxResults":{ @@ -13505,7 +13882,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

  • local-gateway-id - The ID of a local gateway.

  • local-gateway-route-table-id - The ID of the local gateway route table.

  • local-gateway-route-table-vpc-association-id - The ID of the association.

  • state - The state of the association.

  • vpc-id - The ID of the VPC.

", "locationName":"Filter" }, "MaxResults":{ @@ -13547,7 +13924,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

  • local-gateway-id - The ID of a local gateway.

  • local-gateway-route-table-id - The ID of a local gateway route table.

  • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

  • state - The state of the local gateway route table.

", "locationName":"Filter" }, "MaxResults":{ @@ -13589,7 +13966,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

  • local-gateway-id - The ID of a local gateway.

  • local-gateway-virtual-interface-id - The ID of the virtual interface.

  • local-gateway-virtual-interface-group-id - The ID of the virtual interface group.

", "locationName":"Filter" }, "MaxResults":{ @@ -13668,7 +14045,7 @@ "members":{ "LocalGatewayIds":{ "shape":"LocalGatewayIdSet", - "documentation":"

The IDs of the local gateways.

", + "documentation":"

One or more filters.

  • local-gateway-id - The ID of a local gateway.

  • local-gateway-route-table-id - The ID of the local gateway route table.

  • local-gateway-route-table-virtual-interface-group-association-id - The ID of the association.

  • local-gateway-route-table-virtual-interface-group-id - The ID of the virtual interface group.

  • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

  • state - The state of the association.

", "locationName":"LocalGatewayId" }, "Filters":{ @@ -13705,6 +14082,48 @@ } } }, + "DescribeManagedPrefixListsRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

One or more filters.

  • owner-id - The ID of the prefix list owner.

  • prefix-list-id - The ID of the prefix list.

  • prefix-list-name - The name of the prefix list.

", + "locationName":"Filter" + }, + "MaxResults":{ + "shape":"PrefixListMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + }, + "PrefixListIds":{ + "shape":"ValueStringList", + "documentation":"

One or more prefix list IDs.

", + "locationName":"PrefixListId" + } + } + }, + "DescribeManagedPrefixListsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + }, + "PrefixLists":{ + "shape":"ManagedPrefixListSet", + "documentation":"

Information about the prefix lists.

", + "locationName":"prefixListSet" + } + } + }, "DescribeMovingAddressesMaxResults":{ "type":"integer", "max":1000, @@ -13957,7 +14376,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

  • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

  • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

  • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

  • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • association.public-dns-name - The public DNS name for the network interface (IPv4).

  • attachment.attachment-id - The ID of the interface attachment.

  • attachment.attach-time - The time that the network interface was attached to an instance.

  • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

  • attachment.device-index - The device index to which the network interface is attached.

  • attachment.instance-id - The ID of the instance to which the network interface is attached.

  • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • attachment.nat-gateway-id - The ID of the NAT gateway to which the network interface is attached.

  • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • availability-zone - The Availability Zone of the network interface.

  • description - The description of the network interface.

  • group-id - The ID of a security group associated with the network interface.

  • group-name - The name of a security group associated with the network interface.

  • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

  • mac-address - The MAC address of the network interface.

  • network-interface-id - The ID of the network interface.

  • owner-id - The AWS account ID of the network interface owner.

  • private-ip-address - The private IPv4 address or addresses of the network interface.

  • private-dns-name - The private DNS name of the network interface (IPv4).

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

  • requester-managed - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).

  • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

  • subnet-id - The ID of the subnet for the network interface.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network interface.

", + "documentation":"

One or more filters.

  • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

  • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

  • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

  • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

  • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

  • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

  • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

  • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

  • association.public-dns-name - The public DNS name for the network interface (IPv4).

  • attachment.attachment-id - The ID of the interface attachment.

  • attachment.attach-time - The time that the network interface was attached to an instance.

  • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

  • attachment.device-index - The device index to which the network interface is attached.

  • attachment.instance-id - The ID of the instance to which the network interface is attached.

  • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

  • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

  • availability-zone - The Availability Zone of the network interface.

  • description - The description of the network interface.

  • group-id - The ID of a security group associated with the network interface.

  • group-name - The name of a security group associated with the network interface.

  • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

  • mac-address - The MAC address of the network interface.

  • network-interface-id - The ID of the network interface.

  • owner-id - The AWS account ID of the network interface owner.

  • private-ip-address - The private IPv4 address or addresses of the network interface.

  • private-dns-name - The private DNS name of the network interface (IPv4).

  • requester-id - The ID of the entity that launched the instance on your behalf (for example, AWS Management Console, Auto Scaling, and so on).

  • requester-managed - Indicates whether the network interface is being managed by an AWS service (for example, AWS Management Console, Auto Scaling, and so on).

  • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

  • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

  • subnet-id - The ID of the subnet for the network interface.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC for the network interface.

", "locationName":"filter" }, "DryRun":{ @@ -14002,7 +14421,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • group-name - The name of the placement group.

  • state - The state of the placement group (pending | available | deleting | deleted).

  • strategy - The strategy of the placement group (cluster | spread | partition).

", + "documentation":"

The filters.

  • group-name - The name of the placement group.

  • state - The state of the placement group (pending | available | deleting | deleted).

  • strategy - The strategy of the placement group (cluster | spread | partition).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

", "locationName":"Filter" }, "DryRun":{ @@ -14582,7 +15001,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

  • description - The description of the security group.

  • egress.ip-permission.cidr - An IPv4 CIDR block for an outbound security group rule.

  • egress.ip-permission.from-port - For an outbound rule, the start of port range for the TCP and UDP protocols, or an ICMP type number.

  • egress.ip-permission.group-id - The ID of a security group that has been referenced in an outbound security group rule.

  • egress.ip-permission.group-name - The name of a security group that has been referenced in an outbound security group rule.

  • egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an outbound security group rule.

  • egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which a security group rule allows outbound access.

  • egress.ip-permission.protocol - The IP protocol for an outbound security group rule (tcp | udp | icmp or a protocol number).

  • egress.ip-permission.to-port - For an outbound rule, the end of port range for the TCP and UDP protocols, or an ICMP code.

  • egress.ip-permission.user-id - The ID of an AWS account that has been referenced in an outbound security group rule.

  • group-id - The ID of the security group.

  • group-name - The name of the security group.

  • ip-permission.cidr - An IPv4 CIDR block for an inbound security group rule.

  • ip-permission.from-port - For an inbound rule, the start of port range for the TCP and UDP protocols, or an ICMP type number.

  • ip-permission.group-id - The ID of a security group that has been referenced in an inbound security group rule.

  • ip-permission.group-name - The name of a security group that has been referenced in an inbound security group rule.

  • ip-permission.ipv6-cidr - An IPv6 CIDR block for an inbound security group rule.

  • ip-permission.prefix-list-id - The ID (prefix) of the AWS service from which a security group rule allows inbound access.

  • ip-permission.protocol - The IP protocol for an inbound security group rule (tcp | udp | icmp or a protocol number).

  • ip-permission.to-port - For an inbound rule, the end of port range for the TCP and UDP protocols, or an ICMP code.

  • ip-permission.user-id - The ID of an AWS account that has been referenced in an inbound security group rule.

  • owner-id - The AWS account ID of the owner of the security group.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC specified when the security group was created.

", + "documentation":"

The filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

  • description - The description of the security group.

  • egress.ip-permission.cidr - An IPv4 CIDR block for an outbound security group rule.

  • egress.ip-permission.from-port - For an outbound rule, the start of port range for the TCP and UDP protocols, or an ICMP type number.

  • egress.ip-permission.group-id - The ID of a security group that has been referenced in an outbound security group rule.

  • egress.ip-permission.group-name - The name of a security group that has been referenced in an outbound security group rule.

  • egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an outbound security group rule.

  • egress.ip-permission.prefix-list-id - The ID of a prefix list to which a security group rule allows outbound access.

  • egress.ip-permission.protocol - The IP protocol for an outbound security group rule (tcp | udp | icmp or a protocol number).

  • egress.ip-permission.to-port - For an outbound rule, the end of port range for the TCP and UDP protocols, or an ICMP code.

  • egress.ip-permission.user-id - The ID of an AWS account that has been referenced in an outbound security group rule.

  • group-id - The ID of the security group.

  • group-name - The name of the security group.

  • ip-permission.cidr - An IPv4 CIDR block for an inbound security group rule.

  • ip-permission.from-port - For an inbound rule, the start of port range for the TCP and UDP protocols, or an ICMP type number.

  • ip-permission.group-id - The ID of a security group that has been referenced in an inbound security group rule.

  • ip-permission.group-name - The name of a security group that has been referenced in an inbound security group rule.

  • ip-permission.ipv6-cidr - An IPv6 CIDR block for an inbound security group rule.

  • ip-permission.prefix-list-id - The ID of a prefix list from which a security group rule allows inbound access.

  • ip-permission.protocol - The IP protocol for an inbound security group rule (tcp | udp | icmp or a protocol number).

  • ip-permission.to-port - For an inbound rule, the end of port range for the TCP and UDP protocols, or an ICMP code.

  • ip-permission.user-id - The ID of an AWS account that has been referenced in an inbound security group rule.

  • owner-id - The AWS account ID of the owner of the security group.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • vpc-id - The ID of the VPC specified when the security group was created.

", "locationName":"Filter" }, "GroupIds":{ @@ -14672,7 +15091,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • description - A description of the snapshot.

  • encrypted - Indicates whether the snapshot is encrypted (true | false)

  • owner-alias - Value from an Amazon-maintained list (amazon | self | all | aws-marketplace | microsoft) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.

  • owner-id - The ID of the AWS account that owns the snapshot.

  • progress - The progress of the snapshot, as a percentage (for example, 80%).

  • snapshot-id - The snapshot ID.

  • start-time - The time stamp when the snapshot was initiated.

  • status - The status of the snapshot (pending | completed | error).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • volume-id - The ID of the volume the snapshot is for.

  • volume-size - The size of the volume, in GiB.

", + "documentation":"

The filters.

  • description - A description of the snapshot.

  • encrypted - Indicates whether the snapshot is encrypted (true | false)

  • owner-alias - The owner alias, from an Amazon-maintained list (amazon). This is not the user-configured AWS account alias set using the IAM console. We recommend that you use the related parameter instead of this filter.

  • owner-id - The AWS account ID of the owner. We recommend that you use the related parameter instead of this filter.

  • progress - The progress of the snapshot, as a percentage (for example, 80%).

  • snapshot-id - The snapshot ID.

  • start-time - The time stamp when the snapshot was initiated.

  • status - The status of the snapshot (pending | completed | error).

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • volume-id - The ID of the volume the snapshot is for.

  • volume-size - The size of the volume, in GiB.

", "locationName":"Filter" }, "MaxResults":{ @@ -14685,7 +15104,7 @@ }, "OwnerIds":{ "shape":"OwnerStringList", - "documentation":"

Describes the snapshots owned by these owners.

", + "documentation":"

Scopes the results to snapshots with the specified owners. You can specify a combination of AWS account IDs, self, and amazon.

", "locationName":"Owner" }, "RestorableByUserIds":{ @@ -14918,7 +15337,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

  • availability-zone-group - The Availability Zone group.

  • create-time - The time stamp when the Spot Instance request was created.

  • fault-code - The fault code related to the request.

  • fault-message - The fault message related to the request.

  • instance-id - The ID of the instance that fulfilled the request.

  • launch-group - The Spot Instance launch group.

  • launch.block-device-mapping.delete-on-termination - Indicates whether the EBS volume is deleted on instance termination.

  • launch.block-device-mapping.device-name - The device name for the volume in the block device mapping (for example, /dev/sdh or xvdh).

  • launch.block-device-mapping.snapshot-id - The ID of the snapshot for the EBS volume.

  • launch.block-device-mapping.volume-size - The size of the EBS volume, in GiB.

  • launch.block-device-mapping.volume-type - The type of EBS volume: gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic.

  • launch.group-id - The ID of the security group for the instance.

  • launch.group-name - The name of the security group for the instance.

  • launch.image-id - The ID of the AMI.

  • launch.instance-type - The type of instance (for example, m3.medium).

  • launch.kernel-id - The kernel ID.

  • launch.key-name - The name of the key pair the instance launched with.

  • launch.monitoring-enabled - Whether detailed monitoring is enabled for the Spot Instance.

  • launch.ramdisk-id - The RAM disk ID.

  • launched-availability-zone - The Availability Zone in which the request is launched.

  • network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address.

  • network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated.

  • network-interface.description - A description of the network interface.

  • network-interface.device-index - The index of the device for the network interface attachment on the instance.

  • network-interface.group-id - The ID of the security group associated with the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.private-ip-address - The primary private IP address of the network interface.

  • network-interface.subnet-id - The ID of the subnet for the instance.

  • product-description - The product description associated with the instance (Linux/UNIX | Windows).

  • spot-instance-request-id - The Spot Instance request ID.

  • spot-price - The maximum hourly price for any Spot Instance launched to fulfill the request.

  • state - The state of the Spot Instance request (open | active | closed | cancelled | failed). Spot request status information can help you track your Amazon EC2 Spot Instance requests. For more information, see Spot Request Status in the Amazon EC2 User Guide for Linux Instances.

  • status-code - The short code describing the most recent evaluation of your Spot Instance request.

  • status-message - The message explaining the status of the Spot Instance request.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • type - The type of Spot Instance request (one-time | persistent).

  • valid-from - The start date of the request.

  • valid-until - The end date of the request.

", + "documentation":"

One or more filters.

  • availability-zone-group - The Availability Zone group.

  • create-time - The time stamp when the Spot Instance request was created.

  • fault-code - The fault code related to the request.

  • fault-message - The fault message related to the request.

  • instance-id - The ID of the instance that fulfilled the request.

  • launch-group - The Spot Instance launch group.

  • launch.block-device-mapping.delete-on-termination - Indicates whether the EBS volume is deleted on instance termination.

  • launch.block-device-mapping.device-name - The device name for the volume in the block device mapping (for example, /dev/sdh or xvdh).

  • launch.block-device-mapping.snapshot-id - The ID of the snapshot for the EBS volume.

  • launch.block-device-mapping.volume-size - The size of the EBS volume, in GiB.

  • launch.block-device-mapping.volume-type - The type of EBS volume: gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic.

  • launch.group-id - The ID of the security group for the instance.

  • launch.group-name - The name of the security group for the instance.

  • launch.image-id - The ID of the AMI.

  • launch.instance-type - The type of instance (for example, m3.medium).

  • launch.kernel-id - The kernel ID.

  • launch.key-name - The name of the key pair the instance launched with.

  • launch.monitoring-enabled - Whether detailed monitoring is enabled for the Spot Instance.

  • launch.ramdisk-id - The RAM disk ID.

  • launched-availability-zone - The Availability Zone in which the request is launched.

  • network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address.

  • network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated.

  • network-interface.description - A description of the network interface.

  • network-interface.device-index - The index of the device for the network interface attachment on the instance.

  • network-interface.group-id - The ID of the security group associated with the network interface.

  • network-interface.network-interface-id - The ID of the network interface.

  • network-interface.private-ip-address - The primary private IP address of the network interface.

  • network-interface.subnet-id - The ID of the subnet for the instance.

  • product-description - The product description associated with the instance (Linux/UNIX | Windows).

  • spot-instance-request-id - The Spot Instance request ID.

  • spot-price - The maximum hourly price for any Spot Instance launched to fulfill the request.

  • state - The state of the Spot Instance request (open | active | closed | cancelled | failed). Spot request status information can help you track your Amazon EC2 Spot Instance requests. For more information, see Spot request status in the Amazon EC2 User Guide for Linux Instances.

  • status-code - The short code describing the most recent evaluation of your Spot Instance request.

  • status-message - The message explaining the status of the Spot Instance request.

  • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

  • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

  • type - The type of Spot Instance request (one-time | persistent).

  • valid-from - The start date of the request.

  • valid-until - The end date of the request.

", "locationName":"Filter" }, "DryRun":{ @@ -15130,7 +15549,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

  • key - The tag key.

  • resource-id - The ID of the resource.

  • resource-type - The resource type (customer-gateway | dedicated-host | dhcp-options | elastic-ip | fleet | fpga-image | image | instance | host-reservation | internet-gateway | launch-template | natgateway | network-acl | network-interface | placement-group | reserved-instances | route-table | security-group | snapshot | spot-instances-request | subnet | volume | vpc | vpc-endpoint | vpc-endpoint-service | vpc-peering-connection | vpn-connection | vpn-gateway).

  • tag:<key> - The key/value combination of the tag. For example, specify \"tag:Owner\" for the filter name and \"TeamA\" for the filter value to find resources with the tag \"Owner=TeamA\".

  • value - The tag value.

", + "documentation":"

The filters.

  • key - The tag key.

  • resource-id - The ID of the resource.

  • resource-type - The resource type (customer-gateway | dedicated-host | dhcp-options | elastic-ip | fleet | fpga-image | host-reservation | image | instance | internet-gateway | key-pair | launch-template | natgateway | network-acl | network-interface | placement-group | reserved-instances | route-table | security-group | snapshot | spot-instances-request | subnet | volume | vpc | vpc-endpoint | vpc-endpoint-service | vpc-peering-connection | vpn-connection | vpn-gateway).

  • tag:<key> - The key/value combination of the tag. For example, specify \"tag:Owner\" for the filter name and \"TeamA\" for the filter value to find resources with the tag \"Owner=TeamA\".

  • value - The tag value.

", "locationName":"Filter" }, "MaxResults":{ @@ -15626,12 +16045,12 @@ }, "VolumeIds":{ "shape":"VolumeIdStringList", - "documentation":"

The IDs of the volumes for which in-progress modifications will be described.

", + "documentation":"

The IDs of the volumes.

", "locationName":"VolumeId" }, "Filters":{ "shape":"FilterList", - "documentation":"

The filters. Supported filters: volume-id | modification-state | target-size | target-iops | target-volume-type | original-size | original-iops | original-volume-type | start-time | originalMultiAttachEnabled | targetMultiAttachEnabled.

", + "documentation":"

The filters.

  • modification-state - The current modification state (modifying | optimizing | completed | failed).

  • original-iops - The original IOPS rate of the volume.

  • original-size - The original size of the volume, in GiB.

  • original-volume-type - The original volume type of the volume (standard | io1 | gp2 | sc1 | st1).

  • originalMultiAttachEnabled - Indicates whether Multi-Attach support was enabled (true | false).

  • start-time - The modification start time.

  • target-iops - The target IOPS rate of the volume.

  • target-size - The target size of the volume, in GiB.

  • target-volume-type - The target volume type of the volume (standard | io1 | gp2 | sc1 | st1).

  • targetMultiAttachEnabled - Indicates whether Multi-Attach support is to be enabled (true | false).

  • volume-id - The ID of the volume.

", "locationName":"Filter" }, "NextToken":{ @@ -16570,12 +16989,12 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the snapshot.

", + "documentation":"

The ID of the AWS account that enabled fast snapshot restores on the snapshot.

", "locationName":"ownerId" }, "OwnerAlias":{ "shape":"String", - "documentation":"

The alias of the snapshot owner.

", + "documentation":"

The AWS owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", "locationName":"ownerAlias" }, "EnablingTime":{ @@ -16829,7 +17248,7 @@ "members":{ "AssociationId":{ "shape":"RouteTableAssociationId", - "documentation":"

The association ID representing the current association between the route table and subnet.

", + "documentation":"

The association ID representing the current association between the route table and subnet or gateway.

", "locationName":"associationId" }, "DryRun":{ @@ -17158,12 +17577,12 @@ "members":{ "DeleteOnTermination":{ "shape":"Boolean", - "documentation":"

Indicates whether the EBS volume is deleted on instance termination. For more information, see Preserving Amazon EBS Volumes on Instance Termination in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Indicates whether the EBS volume is deleted on instance termination. For more information, see Preserving Amazon EBS volumes on instance termination in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"deleteOnTermination" }, "Iops":{ "shape":"Integer", - "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS for io1 volumes in most Regions. Maximum io1 IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", + "documentation":"

The number of I/O operations per second (IOPS) that the volume supports. For io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information, see Amazon EBS volume types in the Amazon Elastic Compute Cloud User Guide.

Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS for io1 volumes in most Regions. Maximum io1 IOPS of 64,000 is guaranteed only on Nitro-based instances. Other instance families guarantee performance up to 32,000 IOPS. For more information, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide.

Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.

", "locationName":"iops" }, "SnapshotId":{ @@ -17187,7 +17606,7 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

In no case can you remove encryption from an encrypted volume.

Encrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types.

This parameter is not returned by .

", + "documentation":"

Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

In no case can you remove encryption from an encrypted volume.

Encrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported instance types.

This parameter is not returned by .

", "locationName":"encrypted" } }, @@ -17212,6 +17631,16 @@ "shape":"EbsEncryptionSupport", "documentation":"

Indicates whether Amazon EBS encryption is supported.

", "locationName":"encryptionSupport" + }, + "EbsOptimizedInfo":{ + "shape":"EbsOptimizedInfo", + "documentation":"

Describes the optimized EBS performance for the instance type.

", + "locationName":"ebsOptimizedInfo" + }, + "NvmeSupport":{ + "shape":"EbsNvmeSupport", + "documentation":"

Indicates whether non-volatile memory express (NVMe) is supported.

", + "locationName":"nvmeSupport" } }, "documentation":"

Describes the Amazon EBS features supported by the instance type.

" @@ -17258,6 +17687,50 @@ }, "documentation":"

Describes information used to set up an EBS volume specified in a block device mapping.

" }, + "EbsNvmeSupport":{ + "type":"string", + "enum":[ + "unsupported", + "supported", + "required" + ] + }, + "EbsOptimizedInfo":{ + "type":"structure", + "members":{ + "BaselineBandwidthInMbps":{ + "shape":"BaselineBandwidthInMbps", + "documentation":"

The baseline bandwidth performance for an EBS-optimized instance type, in Mbps.

", + "locationName":"baselineBandwidthInMbps" + }, + "BaselineThroughputInMBps":{ + "shape":"BaselineThroughputInMBps", + "documentation":"

The baseline throughput performance for an EBS-optimized instance type, in MBps.

", + "locationName":"baselineThroughputInMBps" + }, + "BaselineIops":{ + "shape":"BaselineIops", + "documentation":"

The baseline input/output storage operations per seconds for an EBS-optimized instance type.

", + "locationName":"baselineIops" + }, + "MaximumBandwidthInMbps":{ + "shape":"MaximumBandwidthInMbps", + "documentation":"

The maximum bandwidth performance for an EBS-optimized instance type, in Mbps.

", + "locationName":"maximumBandwidthInMbps" + }, + "MaximumThroughputInMBps":{ + "shape":"MaximumThroughputInMBps", + "documentation":"

The maximum throughput performance for an EBS-optimized instance type, in MBps.

", + "locationName":"maximumThroughputInMBps" + }, + "MaximumIops":{ + "shape":"MaximumIops", + "documentation":"

The maximum input/output storage operations per second for an EBS-optimized instance type.

", + "locationName":"maximumIops" + } + }, + "documentation":"

Describes the optimized EBS performance for supported instance types.

" + }, "EbsOptimizedSupport":{ "type":"string", "enum":[ @@ -17266,6 +17739,7 @@ "default" ] }, + "EfaSupportedFlag":{"type":"boolean"}, "EgressOnlyInternetGateway":{ "type":"structure", "members":{ @@ -17462,7 +17936,7 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

The type of elastic inference accelerator. The possible values are eia1.medium, eia1.large, and eia1.xlarge.

" + "documentation":"

The type of elastic inference accelerator. The possible values are eia1.medium, eia1.large, eia1.xlarge, eia2.medium, eia2.large, and eia2.xlarge.

" }, "Count":{ "shape":"ElasticInferenceAcceleratorCount", @@ -17630,12 +18104,12 @@ }, "OwnerId":{ "shape":"String", - "documentation":"

The ID of the AWS account that owns the snapshot.

", + "documentation":"

The ID of the AWS account that enabled fast snapshot restores on the snapshot.

", "locationName":"ownerId" }, "OwnerAlias":{ "shape":"String", - "documentation":"

The alias of the snapshot owner.

", + "documentation":"

The AWS owner alias that enabled fast snapshot restores on the snapshot. This is intended for future use.

", "locationName":"ownerAlias" }, "EnablingTime":{ @@ -17839,6 +18313,13 @@ "locationName":"item" } }, + "ErrorSet":{ + "type":"list", + "member":{ + "shape":"ValidationError", + "locationName":"item" + } + }, "EventCode":{ "type":"string", "enum":[ @@ -17969,7 +18450,7 @@ }, "Description":{ "shape":"String", - "documentation":"

A description of the image being exported. The maximum length is 255 bytes.

" + "documentation":"

A description of the image being exported. The maximum length is 255 characters.

" }, "DiskImageFormat":{ "shape":"DiskImageFormat", @@ -17985,11 +18466,16 @@ }, "S3ExportLocation":{ "shape":"ExportTaskS3LocationRequest", - "documentation":"

Information about the destination S3 bucket. The bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

" + "documentation":"

Information about the destination Amazon S3 bucket. The bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

" }, "RoleName":{ "shape":"String", - "documentation":"

The name of the role that grants VM Import/Export permission to export images to your S3 bucket. If this parameter is not specified, the default role is named 'vmimport'.

" + "documentation":"

The name of the role that grants VM Import/Export permission to export images to your Amazon S3 bucket. If this parameter is not specified, the default role is named 'vmimport'.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the image being exported.

", + "locationName":"TagSpecification" } } }, @@ -18018,7 +18504,7 @@ }, "RoleName":{ "shape":"String", - "documentation":"

The name of the role that grants VM Import/Export permission to export images to your S3 bucket.

", + "documentation":"

The name of the role that grants VM Import/Export permission to export images to your Amazon S3 bucket.

", "locationName":"roleName" }, "Progress":{ @@ -18028,7 +18514,7 @@ }, "S3ExportLocation":{ "shape":"ExportTaskS3Location", - "documentation":"

Information about the destination S3 bucket.

", + "documentation":"

Information about the destination Amazon S3 bucket.

", "locationName":"s3ExportLocation" }, "Status":{ @@ -18040,6 +18526,11 @@ "shape":"String", "documentation":"

The status message for the export image task.

", "locationName":"statusMessage" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags assigned to the image being exported.

", + "locationName":"tagSet" } } }, @@ -18068,7 +18559,7 @@ }, "S3ExportLocation":{ "shape":"ExportTaskS3Location", - "documentation":"

Information about the destination S3 bucket.

", + "documentation":"

Information about the destination Amazon S3 bucket.

", "locationName":"s3ExportLocation" }, "Status":{ @@ -18080,6 +18571,11 @@ "shape":"String", "documentation":"

The status message for the export image task.

", "locationName":"statusMessage" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags assigned to the image being exported.

", + "locationName":"tagSet" } }, "documentation":"

Describes an export image task.

" @@ -18160,7 +18656,7 @@ "members":{ "S3Bucket":{ "shape":"String", - "documentation":"

The destination S3 bucket.

", + "documentation":"

The destination Amazon S3 bucket.

", "locationName":"s3Bucket" }, "S3Prefix":{ @@ -18177,7 +18673,7 @@ "members":{ "S3Bucket":{ "shape":"String", - "documentation":"

The destination S3 bucket.

" + "documentation":"

The destination Amazon S3 bucket.

" }, "S3Prefix":{ "shape":"String", @@ -18210,7 +18706,7 @@ }, "S3Bucket":{ "shape":"String", - "documentation":"

The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

", + "documentation":"

The Amazon S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

", "locationName":"s3Bucket" }, "S3Key":{ @@ -18236,12 +18732,12 @@ }, "S3Bucket":{ "shape":"String", - "documentation":"

The S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

", + "documentation":"

The Amazon S3 bucket for the destination image. The destination bucket must exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com.

", "locationName":"s3Bucket" }, "S3Prefix":{ "shape":"String", - "documentation":"

The image is written to a single object in the S3 bucket at the S3 key s3prefix + exportTaskId + '.' + diskImageFormat.

", + "documentation":"

The image is written to a single object in the Amazon S3 bucket at the S3 key s3prefix + exportTaskId + '.' + diskImageFormat.

", "locationName":"s3Prefix" } }, @@ -18317,6 +18813,27 @@ "disabled" ] }, + "FederatedAuthentication":{ + "type":"structure", + "members":{ + "SamlProviderArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM SAML identity provider.

", + "locationName":"samlProviderArn" + } + }, + "documentation":"

Describes the IAM SAML identity provider used for federated authentication.

" + }, + "FederatedAuthenticationRequest":{ + "type":"structure", + "members":{ + "SAMLProviderArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM SAML identity provider.

" + } + }, + "documentation":"

The IAM SAML identity provider used for federated authentication.

" + }, "Filter":{ "type":"structure", "members":{ @@ -18618,39 +19135,39 @@ "members":{ "LaunchTemplateId":{ "shape":"String", - "documentation":"

The ID of the launch template. You must specify either a template ID or a template name.

", + "documentation":"

The ID of the launch template. If you specify the template ID, you can't specify the template name.

", "locationName":"launchTemplateId" }, "LaunchTemplateName":{ "shape":"LaunchTemplateName", - "documentation":"

The name of the launch template. You must specify either a template name or a template ID.

", + "documentation":"

The name of the launch template. If you specify the template name, you can't specify the template ID.

", "locationName":"launchTemplateName" }, "Version":{ "shape":"String", - "documentation":"

The version number of the launch template. You must specify a version number.

", + "documentation":"

The launch template version number, $Latest, or $Default. You must specify a value, otherwise the request fails.

If the value is $Latest, Amazon EC2 uses the latest version of the launch template.

If the value is $Default, Amazon EC2 uses the default version of the launch template.

", "locationName":"version" } }, - "documentation":"

Describes a launch template.

" + "documentation":"

Describes the Amazon EC2 launch template and the launch template version that can be used by a Spot Fleet request to configure Amazon EC2 instances. For information about launch templates, see Launching an instance from a launch template in the Amazon EC2 User Guide for Linux Instances.

" }, "FleetLaunchTemplateSpecificationRequest":{ "type":"structure", "members":{ "LaunchTemplateId":{ "shape":"LaunchTemplateId", - "documentation":"

The ID of the launch template.

" + "documentation":"

The ID of the launch template. If you specify the template ID, you can't specify the template name.

" }, "LaunchTemplateName":{ "shape":"LaunchTemplateName", - "documentation":"

The name of the launch template.

" + "documentation":"

The name of the launch template. If you specify the template name, you can't specify the template ID.

" }, "Version":{ "shape":"String", - "documentation":"

The version number of the launch template. Note: This is a required parameter and will be updated soon.

" + "documentation":"

The launch template version number, $Latest, or $Default. You must specify a value, otherwise the request fails.

If the value is $Latest, Amazon EC2 uses the latest version of the launch template.

If the value is $Default, Amazon EC2 uses the default version of the launch template.

" } }, - "documentation":"

The launch template to use. You must specify either the launch template ID or launch template name in the request.

" + "documentation":"

Describes the Amazon EC2 launch template and the launch template version that can be used by an EC2 Fleet to configure Amazon EC2 instances. For information about launch templates, see Launching an instance from a launch template in the Amazon Elastic Compute Cloud User Guide.

" }, "FleetOnDemandAllocationStrategy":{ "type":"string", @@ -19374,6 +19891,89 @@ } } }, + "GetManagedPrefixListAssociationsMaxResults":{ + "type":"integer", + "max":255, + "min":5 + }, + "GetManagedPrefixListAssociationsRequest":{ + "type":"structure", + "required":["PrefixListId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "PrefixListId":{ + "shape":"PrefixListResourceId", + "documentation":"

The ID of the prefix list.

" + }, + "MaxResults":{ + "shape":"GetManagedPrefixListAssociationsMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + } + } + }, + "GetManagedPrefixListAssociationsResult":{ + "type":"structure", + "members":{ + "PrefixListAssociations":{ + "shape":"PrefixListAssociationSet", + "documentation":"

Information about the associations.

", + "locationName":"prefixListAssociationSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, + "GetManagedPrefixListEntriesRequest":{ + "type":"structure", + "required":["PrefixListId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "PrefixListId":{ + "shape":"PrefixListResourceId", + "documentation":"

The ID of the prefix list.

" + }, + "TargetVersion":{ + "shape":"Long", + "documentation":"

The version of the prefix list for which to return the entries. The default is the current version.

" + }, + "MaxResults":{ + "shape":"PrefixListMaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next page of results.

" + } + } + }, + "GetManagedPrefixListEntriesResult":{ + "type":"structure", + "members":{ + "Entries":{ + "shape":"PrefixListEntrySet", + "documentation":"

Information about the prefix list entries.

", + "locationName":"entrySet" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "locationName":"nextToken" + } + } + }, "GetPasswordDataRequest":{ "type":"structure", "required":["InstanceId"], @@ -19773,7 +20373,7 @@ "locationName":"configured" } }, - "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

" }, "HibernationOptionsRequest":{ "type":"structure", @@ -19783,7 +20383,7 @@ "documentation":"

If you set this parameter to true, your instance is enabled for hibernation.

Default: false

" } }, - "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether your instance is configured for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

" }, "HistoryRecord":{ "type":"structure", @@ -20531,7 +21131,7 @@ }, "Format":{ "shape":"String", - "documentation":"

The format of the disk image being imported.

Valid values: VHD | VMDK | OVA

" + "documentation":"

The format of the disk image being imported.

Valid values: OVA | VHD | VHDX |VMDK

" }, "SnapshotId":{ "shape":"SnapshotId", @@ -20711,6 +21311,11 @@ "LicenseSpecifications":{ "shape":"ImportImageLicenseSpecificationListRequest", "documentation":"

The ARNs of the license configurations.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the image being imported.

", + "locationName":"TagSpecification" } } }, @@ -20729,7 +21334,7 @@ }, "Encrypted":{ "shape":"Boolean", - "documentation":"

Indicates whether the AMI is encypted.

", + "documentation":"

Indicates whether the AMI is encrypted.

", "locationName":"encrypted" }, "Hypervisor":{ @@ -20743,12 +21348,12 @@ "locationName":"imageId" }, "ImportTaskId":{ - "shape":"String", + "shape":"ImportImageTaskId", "documentation":"

The task ID of the import image task.

", "locationName":"importTaskId" }, "KmsKeyId":{ - "shape":"String", + "shape":"KmsKeyId", "documentation":"

The identifier for the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to create the encrypted AMI.

", "locationName":"kmsKeyId" }, @@ -20786,6 +21391,11 @@ "shape":"ImportImageLicenseSpecificationListResponse", "documentation":"

The ARNs of the license configurations.

", "locationName":"licenseSpecifications" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags assigned to the image being imported.

", + "locationName":"tagSet" } } }, @@ -21075,6 +21685,11 @@ "shape":"Blob", "documentation":"

The public key. For API calls, the text must be base64-encoded. For command line tools, base64 encoding is performed for you.

", "locationName":"publicKeyMaterial" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the imported key pair.

", + "locationName":"TagSpecification" } } }, @@ -21090,6 +21705,16 @@ "shape":"String", "documentation":"

The key pair name you provided.

", "locationName":"keyName" + }, + "KeyPairId":{ + "shape":"String", + "documentation":"

The ID of the resulting key pair.

", + "locationName":"keyPairId" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags applied to the imported key pair.

", + "locationName":"tagSet" } } }, @@ -21127,6 +21752,11 @@ "RoleName":{ "shape":"String", "documentation":"

The name of the role to use when not using the default role, 'vmimport'.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the snapshot being imported.

", + "locationName":"TagSpecification" } } }, @@ -21147,6 +21777,11 @@ "shape":"SnapshotTaskDetail", "documentation":"

Information about the import snapshot task.

", "locationName":"snapshotTaskDetail" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags assigned to the snapshot being imported.

", + "locationName":"tagSet" } } }, @@ -22506,6 +23141,29 @@ }, "documentation":"

Describes the disks that are available for the instance type.

" }, + "InstanceTagKeySet":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, + "InstanceTagNotificationAttribute":{ + "type":"structure", + "members":{ + "InstanceTagKeys":{ + "shape":"InstanceTagKeySet", + "documentation":"

The registered tag keys.

", + "locationName":"instanceTagKeySet" + }, + "IncludeAllTagsOfInstance":{ + "shape":"Boolean", + "documentation":"

Indicates wheter all tag keys in the current Region are registered to appear in scheduled event notifications. true indicates that all tag keys in the current Region are registered.

", + "locationName":"includeAllTagsOfInstance" + } + }, + "documentation":"

Describes the registered tag keys for the current Region.

" + }, "InstanceType":{ "type":"string", "enum":[ @@ -22594,6 +23252,15 @@ "r5ad.12xlarge", "r5ad.16xlarge", "r5ad.24xlarge", + "r6g.metal", + "r6g.medium", + "r6g.large", + "r6g.xlarge", + "r6g.2xlarge", + "r6g.4xlarge", + "r6g.8xlarge", + "r6g.12xlarge", + "r6g.16xlarge", "x1.16xlarge", "x1.32xlarge", "x1e.xlarge", @@ -22644,6 +23311,14 @@ "c5.18xlarge", "c5.24xlarge", "c5.metal", + "c5a.large", + "c5a.xlarge", + "c5a.2xlarge", + "c5a.4xlarge", + "c5a.8xlarge", + "c5a.12xlarge", + "c5a.16xlarge", + "c5a.24xlarge", "c5d.large", "c5d.xlarge", "c5d.2xlarge", @@ -22659,6 +23334,15 @@ "c5n.4xlarge", "c5n.9xlarge", "c5n.18xlarge", + "c6g.metal", + "c6g.medium", + "c6g.large", + "c6g.xlarge", + "c6g.2xlarge", + "c6g.4xlarge", + "c6g.8xlarge", + "c6g.12xlarge", + "c6g.16xlarge", "cc1.4xlarge", "cc2.8xlarge", "g2.2xlarge", @@ -22673,6 +23357,7 @@ "g4dn.8xlarge", "g4dn.12xlarge", "g4dn.16xlarge", + "g4dn.metal", "cg1.4xlarge", "p2.xlarge", "p2.8xlarge", @@ -22779,7 +23464,16 @@ "inf1.xlarge", "inf1.2xlarge", "inf1.6xlarge", - "inf1.24xlarge" + "inf1.24xlarge", + "m6g.metal", + "m6g.medium", + "m6g.large", + "m6g.xlarge", + "m6g.2xlarge", + "m6g.4xlarge", + "m6g.8xlarge", + "m6g.12xlarge", + "m6g.16xlarge" ] }, "InstanceTypeHypervisor":{ @@ -22817,6 +23511,11 @@ "documentation":"

Indicates the supported root device types.

", "locationName":"supportedRootDeviceTypes" }, + "SupportedVirtualizationTypes":{ + "shape":"VirtualizationTypeList", + "documentation":"

The supported virtualization types.

", + "locationName":"supportedVirtualizationTypes" + }, "BareMetal":{ "shape":"BareMetalFlag", "documentation":"

Indicates whether the instance is bare metal.

", @@ -23064,7 +23763,7 @@ }, "PrefixListIds":{ "shape":"PrefixListIdList", - "documentation":"

[VPC only] The prefix list IDs for an AWS service. With outbound rules, this is the AWS service to access through a VPC endpoint from instances associated with the security group.

", + "documentation":"

[VPC only] The prefix list IDs.

", "locationName":"prefixListIds" }, "ToPort":{ @@ -23274,6 +23973,11 @@ "shape":"String", "documentation":"

The ID of the key pair.

", "locationName":"keyPairId" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags applied to the key pair.

", + "locationName":"tagSet" } }, "documentation":"

Describes a key pair.

" @@ -24972,6 +25676,69 @@ ] }, "Long":{"type":"long"}, + "ManagedPrefixList":{ + "type":"structure", + "members":{ + "PrefixListId":{ + "shape":"PrefixListResourceId", + "documentation":"

The ID of the prefix list.

", + "locationName":"prefixListId" + }, + "AddressFamily":{ + "shape":"String", + "documentation":"

The IP address version.

", + "locationName":"addressFamily" + }, + "State":{ + "shape":"PrefixListState", + "documentation":"

The state of the prefix list.

", + "locationName":"state" + }, + "StateMessage":{ + "shape":"String", + "documentation":"

The state message.

", + "locationName":"stateMessage" + }, + "PrefixListArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) for the prefix list.

", + "locationName":"prefixListArn" + }, + "PrefixListName":{ + "shape":"String", + "documentation":"

The name of the prefix list.

", + "locationName":"prefixListName" + }, + "MaxEntries":{ + "shape":"Integer", + "documentation":"

The maximum number of entries for the prefix list.

", + "locationName":"maxEntries" + }, + "Version":{ + "shape":"Long", + "documentation":"

The version of the prefix list.

", + "locationName":"version" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags for the prefix list.

", + "locationName":"tagSet" + }, + "OwnerId":{ + "shape":"String", + "documentation":"

The ID of the owner of the prefix list.

", + "locationName":"ownerId" + } + }, + "documentation":"

Describes a managed prefix list.

" + }, + "ManagedPrefixListSet":{ + "type":"list", + "member":{ + "shape":"ManagedPrefixList", + "locationName":"item" + } + }, "MarketType":{ "type":"string", "enum":["spot"] @@ -24980,6 +25747,9 @@ "MaxIpv6AddrPerInterface":{"type":"integer"}, "MaxNetworkInterfaces":{"type":"integer"}, "MaxResults":{"type":"integer"}, + "MaximumBandwidthInMbps":{"type":"integer"}, + "MaximumIops":{"type":"integer"}, + "MaximumThroughputInMBps":{"type":"double"}, "MembershipType":{ "type":"string", "enum":[ @@ -25431,7 +26201,7 @@ }, "BlockDeviceMappings":{ "shape":"InstanceBlockDeviceMappingSpecificationList", - "documentation":"

Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated.

To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Updating the Block Device Mapping when Launching an Instance in the Amazon Elastic Compute Cloud User Guide.

", + "documentation":"

Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated.

To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Updating the block device mapping when launching an instance in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"blockDeviceMapping" }, "DisableApiTermination":{ @@ -25471,7 +26241,7 @@ }, "InstanceType":{ "shape":"AttributeValue", - "documentation":"

Changes the instance type to the specified value. For more information, see Instance Types. If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.

", + "documentation":"

Changes the instance type to the specified value. For more information, see Instance types. If the instance type is not valid, the error returned is InvalidInstanceAttributeValue.

", "locationName":"instanceType" }, "Kernel":{ @@ -25726,6 +26496,48 @@ } } }, + "ModifyManagedPrefixListRequest":{ + "type":"structure", + "required":["PrefixListId"], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "PrefixListId":{ + "shape":"PrefixListResourceId", + "documentation":"

The ID of the prefix list.

" + }, + "CurrentVersion":{ + "shape":"Long", + "documentation":"

The current version of the prefix list.

" + }, + "PrefixListName":{ + "shape":"String", + "documentation":"

A name for the prefix list.

" + }, + "AddEntries":{ + "shape":"AddPrefixListEntries", + "documentation":"

One or more entries to add to the prefix list.

", + "locationName":"AddEntry" + }, + "RemoveEntries":{ + "shape":"RemovePrefixListEntries", + "documentation":"

One or more entries to remove from the prefix list.

", + "locationName":"RemoveEntry" + } + } + }, + "ModifyManagedPrefixListResult":{ + "type":"structure", + "members":{ + "PrefixList":{ + "shape":"ManagedPrefixList", + "documentation":"

Information about the prefix list.

", + "locationName":"prefixList" + } + } + }, "ModifyNetworkInterfaceAttributeRequest":{ "type":"structure", "required":["NetworkInterfaceId"], @@ -25883,12 +26695,20 @@ }, "MapPublicIpOnLaunch":{ "shape":"AttributeBooleanValue", - "documentation":"

Specify true to indicate that ENIs attached to instances created in the specified subnet should be assigned a public IPv4 address.

" + "documentation":"

Specify true to indicate that network interfaces attached to instances created in the specified subnet should be assigned a public IPv4 address.

" }, "SubnetId":{ "shape":"SubnetId", "documentation":"

The ID of the subnet.

", "locationName":"subnetId" + }, + "MapCustomerOwnedIpOnLaunch":{ + "shape":"AttributeBooleanValue", + "documentation":"

Specify true to indicate that network interfaces attached to instances created in the specified subnet should be assigned a customer-owned IPv4 address.

When this value is true, you must specify the customer-owned IP pool using CustomerOwnedIpv4Pool.

" + }, + "CustomerOwnedIpv4Pool":{ + "shape":"CoipPoolId", + "documentation":"

The customer-owned IPv4 address pool associated with the subnet.

You must set this value when you specify true for MapCustomerOwnedIpOnLaunch.

" } } }, @@ -26989,6 +27809,11 @@ "shape":"EnaSupport", "documentation":"

Indicates whether Elastic Network Adapter (ENA) is supported.

", "locationName":"enaSupport" + }, + "EfaSupported":{ + "shape":"EfaSupportedFlag", + "documentation":"

Indicates whether Elastic Fabric Adapter (EFA) is supported.

", + "locationName":"efaSupported" } }, "documentation":"

Describes the networking features of the instance type.

" @@ -27830,12 +28655,12 @@ "members":{ "AvailabilityZone":{ "shape":"String", - "documentation":"

The Availability Zone of the instance.

If not specified, an Availability Zone will be automatically chosen for you based on the load balancing criteria for the Region.

This parameter is not supported by .

", + "documentation":"

The Availability Zone of the instance.

If not specified, an Availability Zone will be automatically chosen for you based on the load balancing criteria for the Region.

This parameter is not supported by CreateFleet.

", "locationName":"availabilityZone" }, "Affinity":{ "shape":"String", - "documentation":"

The affinity setting for the instance on the Dedicated Host. This parameter is not supported for the ImportInstance command.

This parameter is not supported by .

", + "documentation":"

The affinity setting for the instance on the Dedicated Host. This parameter is not supported for the ImportInstance command.

This parameter is not supported by CreateFleet.

", "locationName":"affinity" }, "GroupName":{ @@ -27845,27 +28670,27 @@ }, "PartitionNumber":{ "shape":"Integer", - "documentation":"

The number of the partition the instance is in. Valid only if the placement group strategy is set to partition.

This parameter is not supported by .

", + "documentation":"

The number of the partition the instance is in. Valid only if the placement group strategy is set to partition.

This parameter is not supported by CreateFleet.

", "locationName":"partitionNumber" }, "HostId":{ "shape":"String", - "documentation":"

The ID of the Dedicated Host on which the instance resides. This parameter is not supported for the ImportInstance command.

This parameter is not supported by .

", + "documentation":"

The ID of the Dedicated Host on which the instance resides. This parameter is not supported for the ImportInstance command.

This parameter is not supported by CreateFleet.

", "locationName":"hostId" }, "Tenancy":{ "shape":"Tenancy", - "documentation":"

The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the ImportInstance command.

This parameter is not supported by .

", + "documentation":"

The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the ImportInstance command.

This parameter is not supported by CreateFleet.

", "locationName":"tenancy" }, "SpreadDomain":{ "shape":"String", - "documentation":"

Reserved for future use.

This parameter is not supported by .

", + "documentation":"

Reserved for future use.

This parameter is not supported by CreateFleet.

", "locationName":"spreadDomain" }, "HostResourceGroupArn":{ "shape":"String", - "documentation":"

The ARN of the host resource group in which to launch the instances. If you specify a host resource group ARN, omit the Tenancy parameter or set it to host.

This parameter is not supported by .

", + "documentation":"

The ARN of the host resource group in which to launch the instances. If you specify a host resource group ARN, omit the Tenancy parameter or set it to host.

This parameter is not supported by CreateFleet.

", "locationName":"hostResourceGroupArn" } }, @@ -28045,6 +28870,52 @@ }, "documentation":"

Describes prefixes for AWS services.

" }, + "PrefixListAssociation":{ + "type":"structure", + "members":{ + "ResourceId":{ + "shape":"String", + "documentation":"

The ID of the resource.

", + "locationName":"resourceId" + }, + "ResourceOwner":{ + "shape":"String", + "documentation":"

The owner of the resource.

", + "locationName":"resourceOwner" + } + }, + "documentation":"

Describes the resource with which a prefix list is associated.

" + }, + "PrefixListAssociationSet":{ + "type":"list", + "member":{ + "shape":"PrefixListAssociation", + "locationName":"item" + } + }, + "PrefixListEntry":{ + "type":"structure", + "members":{ + "Cidr":{ + "shape":"String", + "documentation":"

The CIDR block.

", + "locationName":"cidr" + }, + "Description":{ + "shape":"String", + "documentation":"

The description.

", + "locationName":"description" + } + }, + "documentation":"

Describes a prefix list entry.

" + }, + "PrefixListEntrySet":{ + "type":"list", + "member":{ + "shape":"PrefixListEntry", + "locationName":"item" + } + }, "PrefixListId":{ "type":"structure", "members":{ @@ -28075,6 +28946,11 @@ "locationName":"item" } }, + "PrefixListMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, "PrefixListResourceId":{"type":"string"}, "PrefixListResourceIdStringList":{ "type":"list", @@ -28090,6 +28966,23 @@ "locationName":"item" } }, + "PrefixListState":{ + "type":"string", + "enum":[ + "create-in-progress", + "create-complete", + "create-failed", + "modify-in-progress", + "modify-complete", + "modify-failed", + "restore-in-progress", + "restore-complete", + "restore-failed", + "delete-in-progress", + "delete-complete", + "delete-failed" + ] + }, "PriceSchedule":{ "type":"structure", "members":{ @@ -28370,6 +29263,11 @@ "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "PoolTagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the address pool.

", + "locationName":"PoolTagSpecification" } } }, @@ -28449,6 +29347,11 @@ "documentation":"

The total number of available addresses.

", "locationName":"totalAvailableAddressCount" }, + "NetworkBorderGroup":{ + "shape":"String", + "documentation":"

The name of the location from which the address pool is advertised. A network border group is a unique set of Availability Zones or Local Zones from where AWS advertises public IP addresses.

", + "locationName":"networkBorderGroup" + }, "Tags":{ "shape":"TagList", "documentation":"

Any tags for the address pool.

", @@ -28576,6 +29479,11 @@ "OfferingId":{ "shape":"OfferingId", "documentation":"

The ID of the offering.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The tags to apply to the Dedicated Host Reservation during purchase.

", + "locationName":"TagSpecification" } } }, @@ -28901,6 +29809,44 @@ }, "documentation":"

Contains the output of RegisterImage.

" }, + "RegisterInstanceEventNotificationAttributesRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "InstanceTagAttribute":{ + "shape":"RegisterInstanceTagAttributeRequest", + "documentation":"

Information about the tag keys to register.

" + } + } + }, + "RegisterInstanceEventNotificationAttributesResult":{ + "type":"structure", + "members":{ + "InstanceTagAttribute":{ + "shape":"InstanceTagNotificationAttribute", + "documentation":"

The resulting set of tag keys.

", + "locationName":"instanceTagAttribute" + } + } + }, + "RegisterInstanceTagAttributeRequest":{ + "type":"structure", + "members":{ + "IncludeAllTagsOfInstance":{ + "shape":"Boolean", + "documentation":"

Indicates whether to register all tag keys in the current Region. Specify true to register all tag keys.

" + }, + "InstanceTagKeys":{ + "shape":"InstanceTagKeySet", + "documentation":"

The tag keys to register.

", + "locationName":"InstanceTagKey" + } + }, + "documentation":"

Information about the tag keys to register for the current Region. You can either specify individual tag keys or register all tag keys in the current Region. You must specify either IncludeAllTagsOfInstance or InstanceTagKeys in the request

" + }, "RegisterTransitGatewayMulticastGroupMembersRequest":{ "type":"structure", "members":{ @@ -29117,6 +30063,23 @@ } } }, + "RemovePrefixListEntries":{ + "type":"list", + "member":{"shape":"RemovePrefixListEntry"}, + "max":1000, + "min":0 + }, + "RemovePrefixListEntry":{ + "type":"structure", + "required":["Cidr"], + "members":{ + "Cidr":{ + "shape":"String", + "documentation":"

The CIDR block.

" + } + }, + "documentation":"

An entry for a prefix list.

" + }, "ReplaceIamInstanceProfileAssociationRequest":{ "type":"structure", "required":[ @@ -29254,6 +30217,10 @@ "documentation":"

The IPv6 CIDR address block used for the destination match. The value that you provide must match the CIDR of an existing route in the table.

", "locationName":"destinationIpv6CidrBlock" }, + "DestinationPrefixListId":{ + "shape":"PrefixListResourceId", + "documentation":"

The ID of the prefix list for the route.

" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -29675,6 +30642,11 @@ "documentation":"

The end date of the request. If this is a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date is reached. The default end date is 7 days from the current date.

", "locationName":"validUntil" }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

The key-value pair for tagging the Spot Instance request on creation. The value for ResourceType must be spot-instances-request, otherwise the Spot Instance request fails. To tag the Spot Instance request after it has been created, see CreateTags.

", + "locationName":"TagSpecification" + }, "InstanceInterruptionBehavior":{ "shape":"InstanceInterruptionBehavior", "documentation":"

The behavior when a Spot Instance is interrupted. The default is terminate.

" @@ -30477,6 +31449,11 @@ } } }, + "ResourceArn":{ + "type":"string", + "max":1283, + "min":1 + }, "ResourceIdList":{ "type":"list", "member":{"shape":"TaggableResourceId"} @@ -30496,14 +31473,20 @@ "dedicated-host", "dhcp-options", "elastic-ip", + "elastic-gpu", + "export-image-task", + "export-instance-task", "fleet", "fpga-image", "host-reservation", "image", + "import-image-task", + "import-snapshot-task", "instance", "internet-gateway", "key-pair", "launch-template", + "local-gateway-route-table-vpc-association", "natgateway", "network-acl", "network-interface", @@ -30731,6 +31714,42 @@ } } }, + "RestoreManagedPrefixListVersionRequest":{ + "type":"structure", + "required":[ + "PrefixListId", + "PreviousVersion", + "CurrentVersion" + ], + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + }, + "PrefixListId":{ + "shape":"PrefixListResourceId", + "documentation":"

The ID of the prefix list.

" + }, + "PreviousVersion":{ + "shape":"Long", + "documentation":"

The version to restore.

" + }, + "CurrentVersion":{ + "shape":"Long", + "documentation":"

The current version number for the prefix list.

" + } + } + }, + "RestoreManagedPrefixListVersionResult":{ + "type":"structure", + "members":{ + "PrefixList":{ + "shape":"ManagedPrefixList", + "documentation":"

Information about the prefix list.

", + "locationName":"prefixList" + } + } + }, "RevokeClientVpnIngressRequest":{ "type":"structure", "required":[ @@ -31143,7 +32162,7 @@ }, "InstanceType":{ "shape":"InstanceType", - "documentation":"

The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide.

Default: m1.small

" + "documentation":"

The instance type. For more information, see Instance types in the Amazon Elastic Compute Cloud User Guide.

Default: m1.small

" }, "Ipv6AddressCount":{ "shape":"Integer", @@ -31198,7 +32217,7 @@ }, "UserData":{ "shape":"String", - "documentation":"

The user data to make available to the instance. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows). If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.

" + "documentation":"

The user data to make available to the instance. For more information, see Running commands on your Linux instance at launch (Linux) and Adding User Data (Windows). If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.

" }, "AdditionalInfo":{ "shape":"String", @@ -31208,6 +32227,7 @@ "ClientToken":{ "shape":"String", "documentation":"

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.

For more information, see Ensuring Idempotency.

Constraints: Maximum 64 ASCII characters

", + "idempotencyToken":true, "locationName":"clientToken" }, "DisableApiTermination":{ @@ -31251,7 +32271,7 @@ }, "ElasticInferenceAccelerators":{ "shape":"ElasticInferenceAccelerators", - "documentation":"

An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.

", + "documentation":"

An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.

You cannot specify accelerators from different generations in the same request.

", "locationName":"ElasticInferenceAccelerator" }, "TagSpecifications":{ @@ -31269,11 +32289,11 @@ }, "CreditSpecification":{ "shape":"CreditSpecificationRequest", - "documentation":"

The credit option for CPU usage of the burstable performance instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification. For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

Default: standard (T2 instances) or unlimited (T3/T3a instances)

" + "documentation":"

The credit option for CPU usage of the burstable performance instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification. For more information, see Burstable performance instances in the Amazon Elastic Compute Cloud User Guide.

Default: standard (T2 instances) or unlimited (T3/T3a instances)

" }, "CpuOptions":{ "shape":"CpuOptionsRequest", - "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

The CPU options for the instance. For more information, see Optimizing CPU options in the Amazon Elastic Compute Cloud User Guide.

" }, "CapacityReservationSpecification":{ "shape":"CapacityReservationSpecification", @@ -31281,7 +32301,7 @@ }, "HibernationOptions":{ "shape":"HibernationOptionsRequest", - "documentation":"

Indicates whether an instance is enabled for hibernation. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Indicates whether an instance is enabled for hibernation. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

" }, "LicenseSpecifications":{ "shape":"LicenseSpecificationListRequest", @@ -31290,7 +32310,7 @@ }, "MetadataOptions":{ "shape":"InstanceMetadataOptionsRequest", - "documentation":"

The metadata options for the instance. For more information, see Instance Metadata and User Data.

" + "documentation":"

The metadata options for the instance. For more information, see Instance metadata and user data.

" } } }, @@ -32406,7 +33426,7 @@ }, "OwnerAlias":{ "shape":"String", - "documentation":"

Value from an Amazon-maintained list (amazon | self | all | aws-marketplace | microsoft) of snapshot owners. Not to be confused with the user-configured AWS account alias, which is set from the IAM console.

", + "documentation":"

The AWS owner alias, as maintained by Amazon. The possible values are: amazon | self | all | aws-marketplace | microsoft. This AWS owner alias is not to be confused with the user-configured AWS account alias, which is set from the IAM console.

", "locationName":"ownerAlias" }, "Tags":{ @@ -32474,7 +33494,7 @@ }, "UserBucket":{ "shape":"UserBucketDetails", - "documentation":"

The S3 bucket for the disk image.

", + "documentation":"

The Amazon S3 bucket for the disk image.

", "locationName":"userBucket" } }, @@ -32504,7 +33524,7 @@ }, "UserBucket":{ "shape":"UserBucket", - "documentation":"

The S3 bucket for the disk image.

" + "documentation":"

The Amazon S3 bucket for the disk image.

" } }, "documentation":"

The disk container object for the import snapshot request.

" @@ -32650,7 +33670,7 @@ }, "UserBucket":{ "shape":"UserBucketDetails", - "documentation":"

The S3 bucket for the disk image.

", + "documentation":"

The Amazon S3 bucket for the disk image.

", "locationName":"userBucket" } }, @@ -32877,7 +33897,7 @@ }, "IamFleetRole":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that grants the Spot Fleet the permission to request, launch, terminate, and tag instances on your behalf. For more information, see Spot Fleet Prerequisites in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate Spot Instances on your behalf when you cancel its Spot Fleet request using CancelSpotFleetRequests or when the Spot Fleet request expires, if you set TerminateInstancesWithExpiration.

", + "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that grants the Spot Fleet the permission to request, launch, terminate, and tag instances on your behalf. For more information, see Spot Fleet prerequisites in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate Spot Instances on your behalf when you cancel its Spot Fleet request using CancelSpotFleetRequests or when the Spot Fleet request expires, if you set TerminateInstancesWithExpiration.

", "locationName":"iamFleetRole" }, "LaunchSpecifications":{ @@ -33074,7 +34094,7 @@ }, "State":{ "shape":"SpotInstanceState", - "documentation":"

The state of the Spot Instance request. Spot status information helps track your Spot Instance requests. For more information, see Spot Status in the Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

The state of the Spot Instance request. Spot status information helps track your Spot Instance requests. For more information, see Spot status in the Amazon EC2 User Guide for Linux Instances.

", "locationName":"state" }, "Status":{ @@ -33156,7 +34176,7 @@ "members":{ "Code":{ "shape":"String", - "documentation":"

The status code. For a list of status codes, see Spot Status Codes in the Amazon EC2 User Guide for Linux Instances.

", + "documentation":"

The status code. For a list of status codes, see Spot status codes in the Amazon EC2 User Guide for Linux Instances.

", "locationName":"code" }, "Message":{ @@ -33188,7 +34208,7 @@ }, "SpotInstanceType":{ "shape":"SpotInstanceType", - "documentation":"

The Spot Instance request type. For RunInstances, persistent Spot Instance requests are only supported when InstanceInterruptionBehavior is set to either hibernate or stop.

" + "documentation":"

The Spot Instance request type. For RunInstances, persistent Spot Instance requests are only supported when InstanceInterruptionBehavior is set to either hibernate or stop.

" }, "BlockDurationMinutes":{ "shape":"Integer", @@ -33359,7 +34379,7 @@ }, "PrefixListIds":{ "shape":"PrefixListIdSet", - "documentation":"

The prefix list IDs for an AWS service. Not applicable for stale security group rules.

", + "documentation":"

The prefix list IDs. Not applicable for stale security group rules.

", "locationName":"prefixListIds" }, "ToPort":{ @@ -33541,7 +34561,7 @@ }, "Hibernate":{ "shape":"Boolean", - "documentation":"

Hibernates the instance if the instance was enabled for hibernation at launch. If the instance cannot hibernate successfully, a normal shutdown occurs. For more information, see Hibernate Your Instance in the Amazon Elastic Compute Cloud User Guide.

Default: false

" + "documentation":"

Hibernates the instance if the instance was enabled for hibernation at launch. If the instance cannot hibernate successfully, a normal shutdown occurs. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide.

Default: false

" }, "DryRun":{ "shape":"Boolean", @@ -33623,6 +34643,16 @@ "documentation":"

Indicates whether instances launched in this subnet receive a public IPv4 address.

", "locationName":"mapPublicIpOnLaunch" }, + "MapCustomerOwnedIpOnLaunch":{ + "shape":"Boolean", + "documentation":"

Indicates whether a network interface created in this subnet (including a network interface created by RunInstances) receives a customer-owned IPv4 address.

", + "locationName":"mapCustomerOwnedIpOnLaunch" + }, + "CustomerOwnedIpv4Pool":{ + "shape":"CoipPoolId", + "documentation":"

The customer-owned IPv4 address pool associated with the subnet.

", + "locationName":"customerOwnedIpv4Pool" + }, "State":{ "shape":"SubnetState", "documentation":"

The current state of the subnet.

", @@ -33879,7 +34909,7 @@ "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host | fleet | fpga-image | instance | key-pair | launch-template | | natgateway | spot-fleet-request | placement-group | snapshot | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment | transit-gateway-route-table | vpc-endpoint (for interface VPC endpoints)| vpc-endpoint-service (for gateway VPC endpoints) | volume | vpc-flow-log.

To tag a resource after it has been created, see CreateTags.

", + "documentation":"

The type of resource to tag. Currently, the resource types that support tagging on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host | dhcp-options | export-image-task | export-instance-task | fleet | fpga-image | host-reservation | import-image-task | import-snapshot-task | instance | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | key-pair | launch-template | placement-group | prefix-list | launch-template | natgateway | network-acl | security-group | spot-fleet-request | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment | transit-gateway-route-table | volume |vpc | vpc-endpoint (for interface and gateway endpoints) | vpc-endpoint-service (for AWS PrivateLink) | vpc-flow-log.

To tag a resource after it has been created, see CreateTags.

", "locationName":"resourceType" }, "Tags":{ @@ -33922,7 +34952,7 @@ "locationName":"defaultTargetCapacityType" } }, - "documentation":"

The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice, or both to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in and

" + "documentation":"

The number of units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice, or both to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in OnDemandOptions and SpotOptions

" }, "TargetCapacitySpecificationRequest":{ "type":"structure", @@ -33945,7 +34975,7 @@ "documentation":"

The default TotalTargetCapacity, which is either Spot or On-Demand.

" } }, - "documentation":"

The number of units to request. You can choose to set the target capacity as the number of instances. Or you can set the target capacity to a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in and .

" + "documentation":"

The number of units to request. You can choose to set the target capacity as the number of instances. Or you can set the target capacity to a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in OnDemandOptionsRequest and SpotOptionsRequest.

" }, "TargetConfiguration":{ "type":"structure", @@ -35908,21 +36938,21 @@ "members":{ "S3Bucket":{ "shape":"String", - "documentation":"

The name of the S3 bucket where the disk image is located.

" + "documentation":"

The name of the Amazon S3 bucket where the disk image is located.

" }, "S3Key":{ "shape":"String", "documentation":"

The file name of the disk image.

" } }, - "documentation":"

Describes the S3 bucket for the disk image.

" + "documentation":"

Describes the Amazon S3 bucket for the disk image.

" }, "UserBucketDetails":{ "type":"structure", "members":{ "S3Bucket":{ "shape":"String", - "documentation":"

The S3 bucket from which the disk image was created.

", + "documentation":"

The Amazon S3 bucket from which the disk image was created.

", "locationName":"s3Bucket" }, "S3Key":{ @@ -35931,7 +36961,7 @@ "locationName":"s3Key" } }, - "documentation":"

Describes the S3 bucket for the disk image.

" + "documentation":"

Describes the Amazon S3 bucket for the disk image.

" }, "UserData":{ "type":"structure", @@ -36046,6 +37076,33 @@ }, "documentation":"

Describes the vCPU configurations for the instance type.

" }, + "ValidationError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "documentation":"

The error code that indicates why the parameter or parameter combination is not valid. For more information about error codes, see Error Codes.

", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "documentation":"

The error message that describes why the parameter or parameter combination is not valid. For more information about error messages, see Error Codes.

", + "locationName":"message" + } + }, + "documentation":"

The error code and error message that is returned for a parameter or parameter combination that is not valid when a new launch template or new version of a launch template is created.

" + }, + "ValidationWarning":{ + "type":"structure", + "members":{ + "Errors":{ + "shape":"ErrorSet", + "documentation":"

The error codes and error messages.

", + "locationName":"errorSet" + } + }, + "documentation":"

The error codes and error messages that are returned for the parameters or parameter combinations that are not valid when a new launch template or new version of a launch template is created.

" + }, "ValueStringList":{ "type":"list", "member":{ @@ -36114,6 +37171,13 @@ "paravirtual" ] }, + "VirtualizationTypeList":{ + "type":"list", + "member":{ + "shape":"VirtualizationType", + "locationName":"item" + } + }, "Volume":{ "type":"structure", "members":{ @@ -36317,7 +37381,7 @@ }, "OriginalSize":{ "shape":"Integer", - "documentation":"

The original size of the volume.

", + "documentation":"

The original size of the volume, in GiB.

", "locationName":"originalSize" }, "OriginalIops":{ diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index 3c1c07370404..0d976f39dbf1 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index 35c6b239badf..7f005f0d3821 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecr/src/main/resources/codegen-resources/service-2.json b/services/ecr/src/main/resources/codegen-resources/service-2.json index 182d8f8641e9..4538f14a546b 100755 --- a/services/ecr/src/main/resources/codegen-resources/service-2.json +++ b/services/ecr/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ServerException"} ], - "documentation":"

Checks the availability of one or more image layers in a repository.

When an image is pushed to a repository, each image layer is checked to verify if it has been uploaded before. If it is, then the image layer is skipped.

When an image is pulled from a repository, each image layer is checked once to verify it is available to be pulled.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Checks the availability of one or more image layers in a repository.

When an image is pushed to a repository, each image layer is checked to verify if it has been uploaded before. If it has been uploaded, then the image layer is skipped.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, "BatchDeleteImage":{ "name":"BatchDeleteImage", @@ -77,7 +77,7 @@ {"shape":"LayerAlreadyExistsException"}, {"shape":"EmptyUploadException"} ], - "documentation":"

Informs Amazon ECR that the image layer upload has completed for a specified registry, repository name, and upload ID. You can optionally provide a sha256 digest of the image layer for data validation purposes.

When an image is pushed, the CompleteLayerUpload API is called once per each new image layer to verify that the upload has completed.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Informs Amazon ECR that the image layer upload has completed for a specified registry, repository name, and upload ID. You can optionally provide a sha256 digest of the image layer for data validation purposes.

When an image is pushed, the CompleteLayerUpload API is called once per each new image layer to verify that the upload has completed.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, "CreateRepository":{ "name":"CreateRepository", @@ -222,7 +222,7 @@ {"shape":"LayerInaccessibleException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can only get URLs for image layers that are referenced in an image.

When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can only get URLs for image layers that are referenced in an image.

When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer that is not already cached.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, "GetLifecyclePolicy":{ "name":"GetLifecyclePolicy", @@ -285,7 +285,7 @@ {"shape":"InvalidParameterException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Notifies Amazon ECR that you intend to upload an image layer.

When an image is pushed, the InitiateLayerUpload API is called once per image layer that has not already been uploaded. Whether an image layer has been uploaded before is determined by the BatchCheckLayerAvailability API action.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Notifies Amazon ECR that you intend to upload an image layer.

When an image is pushed, the InitiateLayerUpload API is called once per image layer that has not already been uploaded. Whether or not an image layer has been uploaded is determined by the BatchCheckLayerAvailability API action.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, "ListImages":{ "name":"ListImages", @@ -331,10 +331,12 @@ {"shape":"RepositoryNotFoundException"}, {"shape":"ImageAlreadyExistsException"}, {"shape":"LayersNotFoundException"}, + {"shape":"ReferencedImagesNotFoundException"}, {"shape":"LimitExceededException"}, - {"shape":"ImageTagAlreadyExistsException"} + {"shape":"ImageTagAlreadyExistsException"}, + {"shape":"ImageDigestDoesNotMatchException"} ], - "documentation":"

Creates or updates the image manifest and tags associated with an image.

When an image is pushed and all new image layers have been uploaded, the PutImage API is called once to create or update the image manifest and tags associated with the image.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Creates or updates the image manifest and tags associated with an image.

When an image is pushed and all new image layers have been uploaded, the PutImage API is called once to create or update the image manifest and the tags associated with the image.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" }, "PutImageScanningConfiguration":{ "name":"PutImageScanningConfiguration", @@ -394,7 +396,7 @@ {"shape":"InvalidParameterException"}, {"shape":"RepositoryNotFoundException"} ], - "documentation":"

Applies a repository policy to the specified repository to control access permissions. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

" + "documentation":"

Applies a repository policy to the specified repository to control access permissions. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

" }, "StartImageScan":{ "name":"StartImageScan", @@ -407,6 +409,8 @@ "errors":[ {"shape":"ServerException"}, {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedImageTypeException"}, + {"shape":"LimitExceededException"}, {"shape":"RepositoryNotFoundException"}, {"shape":"ImageNotFoundException"} ], @@ -479,7 +483,7 @@ {"shape":"UploadNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Uploads an image layer part to Amazon ECR.

When an image is pushed, each new image layer is uploaded in parts. The maximum size of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per each new image layer part.

This operation is used by the Amazon ECR proxy, and it is not intended for general use by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" + "documentation":"

Uploads an image layer part to Amazon ECR.

When an image is pushed, each new image layer is uploaded in parts. The maximum size of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per each new image layer part.

This operation is used by the Amazon ECR proxy and is not generally used by customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

" } }, "shapes":{ @@ -1208,6 +1212,10 @@ "imageManifest":{ "shape":"ImageManifest", "documentation":"

The image manifest associated with the image.

" + }, + "imageManifestMediaType":{ + "shape":"MediaType", + "documentation":"

The media type associated with the image manifest.

" } }, "documentation":"

An object representing an Amazon ECR image.

" @@ -1252,7 +1260,7 @@ }, "imageSizeInBytes":{ "shape":"ImageSizeInBytes", - "documentation":"

The size, in bytes, of the image in the repository.

Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

" + "documentation":"

The size, in bytes, of the image in the repository.

If the image is a manifest list, this will be the max size of all manifests in the list.

Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

" }, "imagePushedAt":{ "shape":"PushTimestamp", @@ -1274,6 +1282,14 @@ "member":{"shape":"ImageDetail"} }, "ImageDigest":{"type":"string"}, + "ImageDigestDoesNotMatchException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The specified image digest does not match the digest that Amazon ECR calculated for the image.

", + "exception":true + }, "ImageFailure":{ "type":"structure", "members":{ @@ -1299,7 +1315,8 @@ "InvalidImageTag", "ImageTagDoesNotMatchDigest", "ImageNotFound", - "MissingDigestAndTag" + "MissingDigestAndTag", + "ImageReferencedByManifestList" ] }, "ImageFailureList":{ @@ -1785,7 +1802,7 @@ "documentation":"

The error message associated with the exception.

" } }, - "documentation":"

The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Default Service Limits in the Amazon Elastic Container Registry User Guide.

", + "documentation":"

The operation did not succeed because it would have exceeded a service limit for your account. For more information, see Amazon ECR Service Quotas in the Amazon Elastic Container Registry User Guide.

", "exception":true }, "ListImagesFilter":{ @@ -1894,9 +1911,17 @@ "shape":"ImageManifest", "documentation":"

The image manifest corresponding to the image to be uploaded.

" }, + "imageManifestMediaType":{ + "shape":"MediaType", + "documentation":"

The media type of the image manifest. If you push an image manifest that does not contain the mediaType field, you must specify the imageManifestMediaType in the request.

" + }, "imageTag":{ "shape":"ImageTag", "documentation":"

The tag to associate with the image. This parameter is required for images that use the Docker Image Manifest V2 Schema 2 or OCI formats.

" + }, + "imageDigest":{ + "shape":"ImageDigest", + "documentation":"

The image digest of the image manifest corresponding to the image.

" } } }, @@ -2023,6 +2048,14 @@ } } }, + "ReferencedImagesNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The manifest list is referencing an image that does not exist.

", + "exception":true + }, "RegistryId":{ "type":"string", "pattern":"[0-9]{12}" @@ -2171,7 +2204,7 @@ }, "policyText":{ "shape":"RepositoryPolicyText", - "documentation":"

The JSON repository policy text to apply to the repository. For more information, see Amazon ECR Repository Policy Examples in the Amazon Elastic Container Registry User Guide.

" + "documentation":"

The JSON repository policy text to apply to the repository. For more information, see Amazon ECR Repository Policies in the Amazon Elastic Container Registry User Guide.

" }, "force":{ "shape":"ForceFlag", @@ -2337,6 +2370,14 @@ "documentation":"

The list of tags on the repository is over the limit. The maximum number of tags that can be applied to a repository is 50.

", "exception":true }, + "UnsupportedImageTypeException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "documentation":"

The image is of a type that cannot be scanned.

", + "exception":true + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -2387,11 +2428,11 @@ }, "partFirstByte":{ "shape":"PartSize", - "documentation":"

The integer value of the first byte of the layer part.

" + "documentation":"

The position of the first byte of the layer part witin the overall image layer.

" }, "partLastByte":{ "shape":"PartSize", - "documentation":"

The integer value of the last byte of the layer part.

" + "documentation":"

The position of the last byte of the layer part within the overall image layer.

" }, "layerPartBlob":{ "shape":"LayerPartBlob", diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index ebb9c86bf43b..3f8cda0c4cc8 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index b4b62e39dd42..e2af225f7aaa 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -25,7 +25,8 @@ {"shape":"ServerException"}, {"shape":"ClientException"}, {"shape":"InvalidParameterException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"UpdateInProgressException"} ], "documentation":"

Creates a new capacity provider. Capacity providers are associated with an Amazon ECS cluster and are used in capacity provider strategies to facilitate cluster auto scaling.

Only capacity providers using an Auto Scaling group can be created. Amazon ECS tasks on AWS Fargate use the FARGATE and FARGATE_SPOT capacity providers which are already created and available to all accounts in Regions supported by AWS Fargate.

" }, @@ -62,7 +63,7 @@ {"shape":"PlatformTaskDefinitionIncompatibilityException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and the container instance that they're hosted on is reported as healthy by the load balancer.

There are two service scheduler strategies available:

  • REPLICA - The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

  • DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy) with the placementStrategy parameter):

    • Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

" + "documentation":"

Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and the container instance that they're hosted on is reported as healthy by the load balancer.

There are two service scheduler strategies available:

  • REPLICA - The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

  • DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks and will stop tasks that do not meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service Scheduler Concepts in the Amazon Elastic Container Service Developer Guide.

You can optionally specify a deployment configuration for your service. The deployment is triggered by changing properties, such as the task definition or the desired count of a service, with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service is using the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment, as a percentage of the desired number of tasks (rounded up to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they're in the RUNNING state and they're reported as healthy by the load balancer. The default value for minimum healthy percent is 100%.

If a service is using the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desired number of tasks (rounded down to the nearest integer), and while any container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. This parameter enables you to define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service is using either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used, although they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy) with the placementStrategy parameter):

    • Sort the valid container instances, giving priority to instances that have the fewest number of running tasks for this service in their respective Availability Zone. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

" }, "CreateTaskSet":{ "name":"CreateTaskSet", @@ -116,6 +117,21 @@ ], "documentation":"

Deletes one or more custom attributes from an Amazon ECS resource.

" }, + "DeleteCapacityProvider":{ + "name":"DeleteCapacityProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCapacityProviderRequest"}, + "output":{"shape":"DeleteCapacityProviderResponse"}, + "errors":[ + {"shape":"ServerException"}, + {"shape":"ClientException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Deletes the specified capacity provider.

The FARGATE and FARGATE_SPOT capacity providers are reserved and cannot be deleted. You can disassociate them from a cluster using either the PutClusterCapacityProviders API or by deleting the cluster.

Prior to a capacity provider being deleted, the capacity provider must be removed from the capacity provider strategy from all services. The UpdateService API can be used to remove a capacity provider from a service's capacity provider strategy. When updating a service, the forceNewDeployment option can be used to ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity provider are transitioned to use the capacity from the remaining capacity providers. Only capacity providers that are not associated with a cluster can be deleted. To remove a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.

" + }, "DeleteCluster":{ "name":"DeleteCluster", "http":{ @@ -769,7 +785,7 @@ {"shape":"PlatformTaskDefinitionIncompatibilityException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Modifies the parameters of a service.

For services using the rolling update (ECS) deployment controller, the desired count, deployment configuration, network configuration, or task definition used can be updated.

For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, and health check grace period can be updated using this API. If the network configuration, platform version, or task definition need to be updated, a new AWS CodeDeploy deployment should be created. For more information, see CreateDeployment in the AWS CodeDeploy API Reference.

For services using an external deployment controller, you can update only the desired count and health check grace period using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, you should create a new task set. For more information, see CreateTaskSet.

You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you do not need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

  • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer.

  • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment, which enables you to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy):

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

  • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

  • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

" + "documentation":"

Updating the task placement strategies and constraints on an Amazon ECS service remains in preview and is a Beta Service as defined by and subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms apply to your participation in this preview.

Modifies the parameters of a service.

For services using the rolling update (ECS) deployment controller, the desired count, deployment configuration, network configuration, task placement constraints and strategies, or task definition used can be updated.

For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, task placement constraints and strategies, and health check grace period can be updated using this API. If the network configuration, platform version, or task definition need to be updated, a new AWS CodeDeploy deployment should be created. For more information, see CreateDeployment in the AWS CodeDeploy API Reference.

For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, and health check grace period using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, you should create a new task set. For more information, see CreateTaskSet.

You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you do not need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

  • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer.

  • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment, which enables you to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout, after which SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic:

  • Determine which of the container instances in your cluster can support your service's task definition (for example, they have the required CPU, memory, ports, and container instance attributes).

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner (although you can choose a different placement strategy):

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

  • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

  • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

" }, "UpdateServicePrimaryTaskSet":{ "name":"UpdateServicePrimaryTaskSet", @@ -993,12 +1009,20 @@ }, "status":{ "shape":"CapacityProviderStatus", - "documentation":"

The current status of the capacity provider. Only capacity providers in an ACTIVE state can be used in a cluster.

" + "documentation":"

The current status of the capacity provider. Only capacity providers in an ACTIVE state can be used in a cluster. When a capacity provider is successfully deleted, it will have an INACTIVE status.

" }, "autoScalingGroupProvider":{ "shape":"AutoScalingGroupProvider", "documentation":"

The Auto Scaling group settings for the capacity provider.

" }, + "updateStatus":{ + "shape":"CapacityProviderUpdateStatus", + "documentation":"

The update status of the capacity provider. The following are the possible states that will be returned.

DELETE_IN_PROGRESS

The capacity provider is in the process of being deleted.

DELETE_COMPLETE

The capacity provider has been successfully deleted and will have an INACTIVE status.

DELETE_FAILED

The capacity provider was unable to be deleted. The update status reason will provide further details about why the delete failed.

" + }, + "updateStatusReason":{ + "shape":"String", + "documentation":"

The update status reason. This provides further details about the update status for the capacity provider.

" + }, "tags":{ "shape":"Tags", "documentation":"

The metadata that you apply to the capacity provider to help you categorize and organize it. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8

  • Maximum value length - 256 Unicode characters in UTF-8

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case-sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

" @@ -1016,7 +1040,10 @@ }, "CapacityProviderStatus":{ "type":"string", - "enum":["ACTIVE"] + "enum":[ + "ACTIVE", + "INACTIVE" + ] }, "CapacityProviderStrategy":{ "type":"list", @@ -1028,7 +1055,7 @@ "members":{ "capacityProvider":{ "shape":"String", - "documentation":"

The short name or full Amazon Resource Name (ARN) of the capacity provider.

" + "documentation":"

The short name of the capacity provider.

" }, "weight":{ "shape":"CapacityProviderStrategyItemWeight", @@ -1051,6 +1078,14 @@ "max":1000, "min":0 }, + "CapacityProviderUpdateStatus":{ + "type":"string", + "enum":[ + "DELETE_IN_PROGRESS", + "DELETE_COMPLETE", + "DELETE_FAILED" + ] + }, "CapacityProviders":{ "type":"list", "member":{"shape":"CapacityProvider"} @@ -1340,6 +1375,10 @@ "shape":"EnvironmentVariables", "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

" }, + "environmentFiles":{ + "shape":"EnvironmentFiles", + "documentation":"

A list of files containing the environment variables to pass to a container. This parameter maps to the --env-file option to docker run.

You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information on the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they are processed from the top down. It is recommended to use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.

This field is not valid for containers in tasks using the Fargate launch type.

" + }, "mountPoints":{ "shape":"MountPointList", "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives.

" @@ -1422,7 +1461,7 @@ }, "ulimits":{ "shape":"UlimitList", - "documentation":"

A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" + "documentation":"

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it will override the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" }, "logConfiguration":{ "shape":"LogConfiguration", @@ -1430,7 +1469,7 @@ }, "healthCheck":{ "shape":"HealthCheck", - "documentation":"

The health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" + "documentation":"

The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" }, "systemControls":{ "shape":"SystemControls", @@ -1584,6 +1623,10 @@ "shape":"EnvironmentVariables", "documentation":"

The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. You must also specify a container name.

" }, + "environmentFiles":{ + "shape":"EnvironmentFiles", + "documentation":"

A list of files containing the environment variables to pass to a container, instead of the value from the container definition.

" + }, "cpu":{ "shape":"BoxedInteger", "documentation":"

The number of cpu units reserved for the container, instead of the default value from the task definition. You must also specify a container name.

" @@ -1696,7 +1739,7 @@ }, "capacityProviders":{ "shape":"StringList", - "documentation":"

The short name or full Amazon Resource Name (ARN) of one or more capacity providers to associate with the cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created and not already associated with another cluster. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" + "documentation":"

The short name of one or more capacity providers to associate with the cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created and not already associated with another cluster. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" }, "defaultCapacityProviderStrategy":{ "shape":"CapacityProviderStrategy", @@ -1783,7 +1826,7 @@ }, "schedulingStrategy":{ "shape":"SchedulingStrategy", - "documentation":"

The scheduling strategy to use for the service. For more information, see Services.

There are two service scheduler strategies available:

  • REPLICA-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. This scheduler strategy is required if the service is using the CODE_DEPLOY or EXTERNAL deployment controller types.

  • DAEMON-The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. When you're using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies.

    Tasks using the Fargate launch type or the CODE_DEPLOY or EXTERNAL deployment controller types don't support the DAEMON scheduling strategy.

" + "documentation":"

The scheduling strategy to use for the service. For more information, see Services.

There are two service scheduler strategies available:

  • REPLICA-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. This scheduler strategy is required if the service is using the CODE_DEPLOY or EXTERNAL deployment controller types.

  • DAEMON-The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks and will stop tasks that do not meet the placement constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies.

    Tasks using the Fargate launch type or the CODE_DEPLOY or EXTERNAL deployment controller types don't support the DAEMON scheduling strategy.

" }, "deploymentController":{ "shape":"DeploymentController", @@ -1920,6 +1963,22 @@ } } }, + "DeleteCapacityProviderRequest":{ + "type":"structure", + "required":["capacityProvider"], + "members":{ + "capacityProvider":{ + "shape":"String", + "documentation":"

The short name or full Amazon Resource Name (ARN) of the capacity provider to delete.

" + } + } + }, + "DeleteCapacityProviderResponse":{ + "type":"structure", + "members":{ + "capacityProvider":{"shape":"CapacityProvider"} + } + }, "DeleteClusterRequest":{ "type":"structure", "required":["cluster"], @@ -2458,6 +2517,34 @@ "documentation":"

This parameter is specified when you are using Docker volumes. Docker volumes are only supported when you are using the EC2 launch type. Windows containers only support the use of the local driver. To use bind mounts, specify a host instead.

" }, "Double":{"type":"double"}, + "EFSAuthorizationConfig":{ + "type":"structure", + "members":{ + "accessPointId":{ + "shape":"String", + "documentation":"

The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the EFSVolumeConfiguration will be relative to the directory set for the access point. If an access point is used, transit encryption must be enabled in the EFSVolumeConfiguration. For more information, see Working with Amazon EFS Access Points in the Amazon Elastic File System User Guide.

" + }, + "iam":{ + "shape":"EFSAuthorizationConfigIAM", + "documentation":"

Whether or not to use the Amazon ECS task IAM role defined in a task definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the EFSVolumeConfiguration. If this parameter is omitted, the default value of DISABLED is used. For more information, see Using Amazon EFS Access Points in the Amazon Elastic Container Service Developer Guide.

" + } + }, + "documentation":"

The authorization configuration details for the Amazon EFS file system.

" + }, + "EFSAuthorizationConfigIAM":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "EFSTransitEncryption":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "EFSVolumeConfiguration":{ "type":"structure", "required":["fileSystemId"], @@ -2468,10 +2555,48 @@ }, "rootDirectory":{ "shape":"String", - "documentation":"

The directory within the Amazon EFS file system to mount as the root directory inside the host.

" + "documentation":"

The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have the same effect as omitting this parameter.

" + }, + "transitEncryption":{ + "shape":"EFSTransitEncryption", + "documentation":"

Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of DISABLED is used. For more information, see Encrypting Data in Transit in the Amazon Elastic File System User Guide.

" + }, + "transitEncryptionPort":{ + "shape":"BoxedInteger", + "documentation":"

The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. For more information, see EFS Mount Helper in the Amazon Elastic File System User Guide.

" + }, + "authorizationConfig":{ + "shape":"EFSAuthorizationConfig", + "documentation":"

The authorization configuration details for the Amazon EFS file system.

" + } + }, + "documentation":"

This parameter is specified when you are using an Amazon Elastic File System file system for task storage. For more information, see Amazon EFS Volumes in the Amazon Elastic Container Service Developer Guide.

" + }, + "EnvironmentFile":{ + "type":"structure", + "required":[ + "value", + "type" + ], + "members":{ + "value":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file.

" + }, + "type":{ + "shape":"EnvironmentFileType", + "documentation":"

The file type to use. The only supported value is s3.

" } }, - "documentation":"

This parameter is specified when you are using an Amazon Elastic File System (Amazon EFS) file storage. Amazon EFS file systems are only supported when you are using the EC2 launch type.

EFSVolumeConfiguration remains in preview and is a Beta Service as defined by and subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms apply to your participation in this preview of EFSVolumeConfiguration.

" + "documentation":"

A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information on the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they are processed from the top down. It is recommended to use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.

This field is not valid for containers in tasks using the Fargate launch type.

" + }, + "EnvironmentFileType":{ + "type":"string", + "enum":["s3"] + }, + "EnvironmentFiles":{ + "type":"list", + "member":{"shape":"EnvironmentFile"} }, "EnvironmentVariables":{ "type":"list", @@ -2555,7 +2680,7 @@ "documentation":"

The optional grace period within which to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You may specify between 0 and 300 seconds. The startPeriod is disabled by default.

If a health check succeeds within the startPeriod, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.

" } }, - "documentation":"

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).

The following are notes about container health check support:

  • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS Container Agent.

  • Container health checks are supported for Fargate tasks if you are using platform version 1.1.0 or greater. For more information, see AWS Fargate Platform Versions.

  • Container health checks are not supported for tasks that are part of a service that is configured to use a Classic Load Balancer.

" + "documentation":"

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).

You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

The following describes the possible healthStatus values for a container:

  • HEALTHY-The container health check has passed successfully.

  • UNHEALTHY-The container health check has failed.

  • UNKNOWN-The container health check is being evaluated or there is no container health check defined.

The following describes the possible healthStatus values for a task. The container health check status of nonessential containers do not have an effect on the health status of a task.

  • HEALTHY-All essential containers within the task have passed their health checks.

  • UNHEALTHY-One or more essential containers have failed their health check.

  • UNKNOWN-The essential containers within the task are still having their health checks evaluated or there are no container health checks defined.

If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

The following are notes about container health check support:

  • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS Container Agent.

  • Container health checks are supported for Fargate tasks if you are using platform version 1.1.0 or greater. For more information, see AWS Fargate Platform Versions.

  • Container health checks are not supported for tasks that are part of a service that is configured to use a Classic Load Balancer.

" }, "HealthStatus":{ "type":"string", @@ -2658,7 +2783,7 @@ "members":{ "add":{ "shape":"StringList", - "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

If you are using tasks that use the Fargate launch type, the add parameter is not supported.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" + "documentation":"

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

The SYS_PTRACE capability is supported for tasks that use the Fargate launch type if they are also using platform version 1.4.0. The other capabilities are not supported for any platform versions.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

" }, "drop":{ "shape":"StringList", @@ -2700,7 +2825,7 @@ "members":{ "capabilities":{ "shape":"KernelCapabilities", - "documentation":"

The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker.

If you are using tasks that use the Fargate launch type, capabilities is supported but the add parameter is not supported.

" + "documentation":"

The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker.

For tasks that use the Fargate launch type, capabilities is supported for all platform versions but the add parameter is only supported if using platform version 1.4.0 or later.

" }, "devices":{ "shape":"DevicesList", @@ -2734,7 +2859,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

The resource name you want to list the account settings for.

" + "documentation":"

The name of the account setting you want to list the settings for.

" }, "value":{ "shape":"String", @@ -3599,7 +3724,7 @@ }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

" }, "networkMode":{ "shape":"NetworkMode", @@ -3996,7 +4121,7 @@ }, "schedulingStrategy":{ "shape":"SchedulingStrategy", - "documentation":"

The scheduling strategy to use for the service. For more information, see Services.

There are two service scheduler strategies available:

  • REPLICA-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions.

  • DAEMON-The daemon scheduling strategy deploys exactly one task on each container instance in your cluster. When you are using this strategy, do not specify a desired number of tasks or any task placement strategies.

    Fargate tasks do not support the DAEMON scheduling strategy.

" + "documentation":"

The scheduling strategy to use for the service. For more information, see Services.

There are two service scheduler strategies available:

  • REPLICA-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions.

  • DAEMON-The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks and will stop tasks that do not meet the placement constraints.

    Fargate tasks do not support the DAEMON scheduling strategy.

" }, "deploymentController":{ "shape":"DeploymentController", @@ -4609,11 +4734,11 @@ }, "taskRoleArn":{ "shape":"String", - "documentation":"

The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

" }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the task execution role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

" }, "networkMode":{ "shape":"NetworkMode", @@ -4744,7 +4869,7 @@ }, "executionRoleArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

" + "documentation":"

The Amazon Resource Name (ARN) of the task execution IAM role override for the task.

" }, "memory":{ "shape":"String", @@ -5133,7 +5258,7 @@ }, "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

The capacity provider strategy to update the service to use.

If the service is using the default capacity provider strategy for the cluster, the service can be updated to use one or more capacity providers. However, when a service is using a non-default capacity provider strategy, the service cannot be updated to use the cluster's default capacity provider strategy.

" + "documentation":"

The capacity provider strategy to update the service to use.

If the service is using the default capacity provider strategy for the cluster, the service can be updated to use one or more capacity providers as opposed to the default capacity provider strategy. However, when a service is using a capacity provider strategy that is not the default capacity provider strategy, the service cannot be updated to use the cluster's default capacity provider strategy.

A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

" }, "deploymentConfiguration":{ "shape":"DeploymentConfiguration", @@ -5228,7 +5353,7 @@ }, "host":{ "shape":"HostVolumeProperties", - "documentation":"

This parameter is specified when you are using bind mount host volumes. Bind mount host volumes are supported when you are using either the EC2 or Fargate launch types. The contents of the host parameter determine whether your bind mount host volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data is not guaranteed to persist after the containers associated with it stop running.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives. For example, you can mount C:\\my\\path:C:\\my\\path and D:\\:D:\\, but not D:\\my\\path:C:\\my\\path or D:\\:C:\\my\\path.

" + "documentation":"

This parameter is specified when you are using bind mount host volumes. The contents of the host parameter determine whether your bind mount host volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data is not guaranteed to persist after the containers associated with it stop running.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives. For example, you can mount C:\\my\\path:C:\\my\\path and D:\\:D:\\, but not D:\\my\\path:C:\\my\\path or D:\\:C:\\my\\path.

" }, "dockerVolumeConfiguration":{ "shape":"DockerVolumeConfiguration", @@ -5236,10 +5361,10 @@ }, "efsVolumeConfiguration":{ "shape":"EFSVolumeConfiguration", - "documentation":"

This parameter is specified when you are using an Amazon Elastic File System (Amazon EFS) file storage. Amazon EFS file systems are only supported when you are using the EC2 launch type.

EFSVolumeConfiguration remains in preview and is a Beta Service as defined by and subject to the Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms (\"Beta Terms\"). These Beta Terms apply to your participation in this preview of EFSVolumeConfiguration.

" + "documentation":"

This parameter is specified when you are using an Amazon Elastic File System file system for task storage.

" } }, - "documentation":"

A data volume used in a task definition. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

" + "documentation":"

A data volume used in a task definition. For tasks that use Amazon Elastic File System (Amazon EFS) file storage, specify an efsVolumeConfiguration. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

" }, "VolumeFrom":{ "type":"structure", diff --git a/services/efs/pom.xml b/services/efs/pom.xml index 6f6a2b9b2fbe..67d2e2d7ba31 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/efs/src/main/resources/codegen-resources/service-2.json b/services/efs/src/main/resources/codegen-resources/service-2.json index 68374320effc..6fa2a24199bb 100644 --- a/services/efs/src/main/resources/codegen-resources/service-2.json +++ b/services/efs/src/main/resources/codegen-resources/service-2.json @@ -23,6 +23,7 @@ "errors":[ {"shape":"BadRequest"}, {"shape":"AccessPointAlreadyExists"}, + {"shape":"IncorrectFileSystemLifeCycleState"}, {"shape":"InternalServerError"}, {"shape":"FileSystemNotFound"}, {"shape":"AccessPointLimitExceeded"} @@ -186,6 +187,24 @@ ], "documentation":"

Returns the description of a specific Amazon EFS access point if the AccessPointId is provided. If you provide an EFS FileSystemId, it returns descriptions of all access points for that file system. You can provide either an AccessPointId or a FileSystemId in the request, but not both.

This operation requires permissions for the elasticfilesystem:DescribeAccessPoints action.

" }, + "DescribeBackupPolicy":{ + "name":"DescribeBackupPolicy", + "http":{ + "method":"GET", + "requestUri":"/2015-02-01/file-systems/{FileSystemId}/backup-policy", + "responseCode":200 + }, + "input":{"shape":"DescribeBackupPolicyRequest"}, + "output":{"shape":"BackupPolicyDescription"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"}, + {"shape":"InternalServerError"}, + {"shape":"PolicyNotFound"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the backup policy for the specified EFS file system.

" + }, "DescribeFileSystemPolicy":{ "name":"DescribeFileSystemPolicy", "http":{ @@ -322,6 +341,24 @@ ], "documentation":"

Modifies the set of security groups in effect for a mount target.

When you create a mount target, Amazon EFS also creates a new network interface. For more information, see CreateMountTarget. This operation replaces the security groups in effect for the network interface associated with a mount target, with the SecurityGroups provided in the request. This operation requires that the network interface of the mount target has been created and the lifecycle state of the mount target is not deleted.

The operation requires permissions for the following actions:

  • elasticfilesystem:ModifyMountTargetSecurityGroups action on the mount target's file system.

  • ec2:ModifyNetworkInterfaceAttribute action on the mount target's network interface.

" }, + "PutBackupPolicy":{ + "name":"PutBackupPolicy", + "http":{ + "method":"PUT", + "requestUri":"/2015-02-01/file-systems/{FileSystemId}/backup-policy", + "responseCode":200 + }, + "input":{"shape":"PutBackupPolicyRequest"}, + "output":{"shape":"BackupPolicyDescription"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"}, + {"shape":"IncorrectFileSystemLifeCycleState"}, + {"shape":"InternalServerError"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates the file system's backup policy. Use this action to start or stop automatic backups of the file system.

" + }, "PutFileSystemPolicy":{ "name":"PutFileSystemPolicy", "http":{ @@ -501,7 +538,31 @@ }, "AvailabilityZoneId":{"type":"string"}, "AvailabilityZoneName":{"type":"string"}, - "AwsAccountId":{"type":"string"}, + "AwsAccountId":{ + "type":"string", + "max":14, + "pattern":"^(\\d{12})|(\\d{4}-\\d{4}-\\d{4})$" + }, + "BackupPolicy":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"Status", + "documentation":"

Describes the status of the file system's backup policy.

  • ENABLED - EFS is automatically backing up the file system.

  • ENABLING - EFS is turning on automatic backups for the file system.

  • DISABLED - automatic back ups are turned off for the file system.

  • DISABLED - EFS is turning off automatic backups for the file system.

" + } + }, + "documentation":"

The backup policy for the file system, showing the curent status. If ENABLED, the file system is being backed up.

" + }, + "BackupPolicyDescription":{ + "type":"structure", + "members":{ + "BackupPolicy":{ + "shape":"BackupPolicy", + "documentation":"

Describes the file system's backup policy, indicating whether automatic backups are turned on or off..

" + } + } + }, "BadRequest":{ "type":"structure", "required":["ErrorCode"], @@ -656,7 +717,8 @@ "CreationToken":{ "type":"string", "max":64, - "min":1 + "min":1, + "pattern":".+" }, "DeleteAccessPointRequest":{ "type":"structure", @@ -781,6 +843,18 @@ } } }, + "DescribeBackupPolicyRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

Specifies which EFS file system to retrieve the BackupPolicy for.

", + "location":"uri", + "locationName":"FileSystemId" + } + } + }, "DescribeFileSystemPolicyRequest":{ "type":"structure", "required":["FileSystemId"], @@ -994,6 +1068,7 @@ "error":{"httpStatusCode":409}, "exception":true }, + "FileSystemArn":{"type":"string"}, "FileSystemDescription":{ "type":"structure", "required":[ @@ -1020,6 +1095,10 @@ "shape":"FileSystemId", "documentation":"

The ID of the file system, assigned by Amazon EFS.

" }, + "FileSystemArn":{ + "shape":"FileSystemArn", + "documentation":"

The Amazon Resource Name (ARN) for the EFS file system, in the format arn:aws:elasticfilesystem:region:account-id:file-system/file-system-id . Example with sample data: arn:aws:elasticfilesystem:us-west-2:1111333322228888:file-system/fs-01234567

" + }, "CreationTime":{ "shape":"Timestamp", "documentation":"

The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z).

" @@ -1071,7 +1150,11 @@ "type":"list", "member":{"shape":"FileSystemDescription"} }, - "FileSystemId":{"type":"string"}, + "FileSystemId":{ + "type":"string", + "max":128, + "pattern":"^(arn:aws[-a-z]*:elasticfilesystem:[0-9a-z-:]+:file-system/fs-[0-9a-f]{8,40}|fs-[0-9a-f]{8,40})$" + }, "FileSystemInUse":{ "type":"structure", "required":["ErrorCode"], @@ -1208,7 +1291,12 @@ "error":{"httpStatusCode":400}, "exception":true }, - "IpAddress":{"type":"string"}, + "IpAddress":{ + "type":"string", + "max":15, + "min":7, + "pattern":"^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}$" + }, "IpAddressInUse":{ "type":"structure", "required":["ErrorCode"], @@ -1223,7 +1311,7 @@ "KmsKeyId":{ "type":"string", "max":2048, - "min":1 + "pattern":"^([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}|alias/[a-zA-Z0-9/_-]+|(arn:aws[-a-z]*:kms:[a-z0-9-]+:\\d{12}:((key/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})|(alias/[a-zA-Z0-9/_-]+))))$" }, "LifeCycleState":{ "type":"string", @@ -1295,7 +1383,12 @@ } } }, - "Marker":{"type":"string"}, + "Marker":{ + "type":"string", + "max":128, + "min":1, + "pattern":".+" + }, "MaxItems":{ "type":"integer", "min":1 @@ -1380,6 +1473,10 @@ "AvailabilityZoneName":{ "shape":"AvailabilityZoneName", "documentation":"

The name of the Availability Zone (AZ) that the mount target resides in. AZs are independently mapped to names for each AWS account. For example, the Availability Zone us-east-1a for your AWS account might not be the same location as us-east-1a for another AWS account.

" + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

The Virtual Private Cloud (VPC) ID that the mount target is configured in.

" } }, "documentation":"

Provides a description of a mount target.

" @@ -1388,7 +1485,12 @@ "type":"list", "member":{"shape":"MountTargetDescription"} }, - "MountTargetId":{"type":"string"}, + "MountTargetId":{ + "type":"string", + "max":45, + "min":13, + "pattern":"^fsmt-[0-9a-f]{8,40}$" + }, "MountTargetNotFound":{ "type":"structure", "required":["ErrorCode"], @@ -1487,6 +1589,25 @@ "type":"double", "min":1.0 }, + "PutBackupPolicyRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "BackupPolicy" + ], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

Specifies which EFS file system to update the backup policy for.

", + "location":"uri", + "locationName":"FileSystemId" + }, + "BackupPolicy":{ + "shape":"BackupPolicy", + "documentation":"

The backup policy included in the PutBackupPolicy request.

" + } + } + }, "PutFileSystemPolicyRequest":{ "type":"structure", "required":[ @@ -1550,7 +1671,12 @@ "max":16, "min":0 }, - "SecurityGroup":{"type":"string"}, + "SecurityGroup":{ + "type":"string", + "max":43, + "min":11, + "pattern":"^sg-[0-9a-f]{8,40}" + }, "SecurityGroupLimitExceeded":{ "type":"structure", "required":["ErrorCode"], @@ -1578,7 +1704,21 @@ "member":{"shape":"SecurityGroup"}, "max":5 }, - "SubnetId":{"type":"string"}, + "Status":{ + "type":"string", + "enum":[ + "ENABLED", + "ENABLING", + "DISABLED", + "DISABLING" + ] + }, + "SubnetId":{ + "type":"string", + "max":47, + "min":15, + "pattern":"^subnet-[0-9a-f]{8,40}$" + }, "SubnetNotFound":{ "type":"structure", "required":["ErrorCode"], @@ -1611,7 +1751,8 @@ "TagKey":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":"^(?![aA]{1}[wW]{1}[sS]{1}:)([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)$" }, "TagKeys":{ "type":"list", @@ -1640,7 +1781,8 @@ }, "TagValue":{ "type":"string", - "max":256 + "max":256, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "Tags":{ "type":"list", @@ -1705,7 +1847,10 @@ }, "UntagResourceRequest":{ "type":"structure", - "required":["ResourceId"], + "required":[ + "ResourceId", + "TagKeys" + ], "members":{ "ResourceId":{ "shape":"ResourceId", @@ -1715,7 +1860,9 @@ }, "TagKeys":{ "shape":"TagKeys", - "documentation":"

The keys of the key:value tag pairs that you want to remove from the specified EFS resource.

" + "documentation":"

The keys of the key:value tag pairs that you want to remove from the specified EFS resource.

", + "location":"querystring", + "locationName":"tagKeys" } } }, @@ -1738,7 +1885,19 @@ "documentation":"

(Optional) The amount of throughput, in MiB/s, that you want to provision for your file system. Valid values are 1-1024. Required if ThroughputMode is changed to provisioned on update. If you're not updating the amount of provisioned throughput for your file system, you don't need to provide this value in your request.

" } } - } + }, + "ValidationException":{ + "type":"structure", + "required":["ErrorCode"], + "members":{ + "ErrorCode":{"shape":"ErrorCode"}, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Returned if the AWS Backup service is not available in the region that the request was made.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "VpcId":{"type":"string"} }, "documentation":"Amazon Elastic File System

Amazon Elastic File System (Amazon EFS) provides simple, scalable file storage for use with Amazon EC2 instances in the AWS Cloud. With Amazon EFS, storage capacity is elastic, growing and shrinking automatically as you add and remove files, so your applications have the storage they need, when they need it. For more information, see the User Guide.

" } diff --git a/services/eks/pom.xml b/services/eks/pom.xml index e1ce0747770f..0b6c3d8ae596 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index ebe256ba980c..a65020929dca 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticache/src/main/resources/codegen-resources/service-2.json b/services/elasticache/src/main/resources/codegen-resources/service-2.json index 9ac469cc78a8..9f59d846d4ff 100644 --- a/services/elasticache/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticache/src/main/resources/codegen-resources/service-2.json @@ -228,7 +228,7 @@ {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore.

  • The GlobalReplicationGroupId is the name of the Global Datastore.

  • The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.

" + "documentation":"

Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis, you can create cross-region read replica clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore.

  • The GlobalReplicationGroupIdSuffix is the name of the Global Datastore.

  • The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.

" }, "CreateReplicationGroup":{ "name":"CreateReplicationGroup", @@ -260,7 +260,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global Datastore.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide.

This operation is valid for Redis only.

" + "documentation":"

Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.

This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global Datastore.

A Redis (cluster mode disabled) replication group is a collection of clusters, where one of the clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.

A Redis (cluster mode enabled) replication group is a collection of 1 to 90 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).

When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' scaling. For more information, see Scaling ElastiCache for Redis Clusters in the ElastiCache User Guide.

This operation is valid for Redis only.

" }, "CreateSnapshot":{ "name":"CreateSnapshot", @@ -413,7 +413,7 @@ {"shape":"InvalidGlobalReplicationGroupStateFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Deleting a Global Datastore is a two-step process:

  • First, you must DisassociateGlobalReplicationGroup to remove the secondary clusters in the Global Datastore.

  • Once the Global Datastore contains only the primary cluster, you can use DeleteGlobalReplicationGroup API to delete the Global Datastore while retainining the primary cluster using Retain…= true.

Since the Global Datastore has only a primary cluster, you can delete the Global Datastore while retaining the primary by setting RetainPrimaryCluster=true.

When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation.

This operation is valid for Redis only.

" + "documentation":"

Deleting a Global Datastore is a two-step process:

  • First, you must DisassociateGlobalReplicationGroup to remove the secondary clusters in the Global Datastore.

  • Once the Global Datastore contains only the primary cluster, you can use DeleteGlobalReplicationGroup API to delete the Global Datastore while retainining the primary cluster using Retain…= true.

Since the Global Datastore has only a primary cluster, you can delete the Global Datastore while retaining the primary by setting RetainPrimaryCluster=true.

When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the selected resources; you cannot cancel or revert this operation.

" }, "DeleteReplicationGroup":{ "name":"DeleteReplicationGroup", @@ -753,7 +753,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Used to failover the primary region to a selected secondary region.

" + "documentation":"

Used to failover the primary region to a selected secondary region. The selected secondary region will become primary, and all other clusters will become secondary.

" }, "IncreaseNodeGroupsInGlobalReplicationGroup":{ "name":"IncreaseNodeGroupsInGlobalReplicationGroup", @@ -947,7 +947,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Modifies the settings for a replication group.

For Redis (cluster mode enabled) clusters, this operation cannot be used to change a cluster's node type or engine version. For more information, see:

This operation is valid for Redis only.

" + "documentation":"

Modifies the settings for a replication group.

This operation is valid for Redis only.

" }, "ModifyReplicationGroupShardConfiguration":{ "name":"ModifyReplicationGroupShardConfiguration", @@ -1010,7 +1010,7 @@ {"shape":"InvalidGlobalReplicationGroupStateFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Redistribute slots to ensure unifirom distribution across existing shards in the cluster.

" + "documentation":"

Redistribute slots to ensure uniform distribution across existing shards in the cluster.

" }, "RebootCacheCluster":{ "name":"RebootCacheCluster", @@ -1129,7 +1129,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"

Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

Note the following

  • A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and AWS CLI) in any rolling 24-hour period.

  • If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.

  • If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.

  • To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:

    1. Replication group message: Test Failover API called for node group <node-group-id>

    2. Cache cluster message: Failover from master node <primary-node-id> to replica node <node-id> completed

    3. Replication group message: Failover from master node <primary-node-id> to replica node <node-id> completed

    4. Cache cluster message: Recovering cache nodes <node-id>

    5. Cache cluster message: Finished recovery for cache nodes <node-id>

    For more information see:

Also see, Testing Multi-AZ with Automatic Failover in the ElastiCache User Guide.

" + "documentation":"

Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

Note the following

  • A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and AWS CLI) in any rolling 24-hour period.

  • If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.

  • If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.

  • To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:

    1. Replication group message: Test Failover API called for node group <node-group-id>

    2. Cache cluster message: Failover from master node <primary-node-id> to replica node <node-id> completed

    3. Replication group message: Failover from master node <primary-node-id> to replica node <node-id> completed

    4. Cache cluster message: Recovering cache nodes <node-id>

    5. Cache cluster message: Finished recovery for cache nodes <node-id>

    For more information see:

Also see, Testing Multi-AZ in the ElastiCache User Guide.

" } }, "shapes":{ @@ -1426,6 +1426,10 @@ "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", "documentation":"

A flag that enables encryption at-rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

Default: false

" + }, + "ARN":{ + "shape":"String", + "documentation":"

The ARN (Amazon Resource Name) of the cache cluster.

" } }, "documentation":"

Contains all of the attributes of a specific cluster.

", @@ -1709,6 +1713,10 @@ "IsGlobal":{ "shape":"Boolean", "documentation":"

Indicates whether the parameter group is associated with a Global Datastore

" + }, + "ARN":{ + "shape":"String", + "documentation":"

The ARN (Amazon Resource Name) of the cache parameter group.

" } }, "documentation":"

Represents the output of a CreateCacheParameterGroup operation.

", @@ -1835,6 +1843,10 @@ "EC2SecurityGroups":{ "shape":"EC2SecurityGroupList", "documentation":"

A list of Amazon EC2 security groups that are associated with this cache security group.

" + }, + "ARN":{ + "shape":"String", + "documentation":"

The ARN (Amazon Resource Name) of the cache security group.

" } }, "documentation":"

Represents the output of one of the following operations:

  • AuthorizeCacheSecurityGroupIngress

  • CreateCacheSecurityGroup

  • RevokeCacheSecurityGroupIngress

", @@ -1943,6 +1955,10 @@ "Subnets":{ "shape":"SubnetList", "documentation":"

A list of subnets associated with the cache subnet group.

" + }, + "ARN":{ + "shape":"String", + "documentation":"

The ARN (Amazon Resource Name) of the cache subnet group.

" } }, "documentation":"

Represents the output of one of the following operations:

  • CreateCacheSubnetGroup

  • ModifyCacheSubnetGroup

", @@ -2088,7 +2104,7 @@ }, "NewReplicaCount":{ "shape":"Integer", - "documentation":"

The number of replicas you want in this node group at the end of this operation. The maximum value for NewReplicaCount is 5. The minimum value depends upon the type of Redis replication group you are working with.

The minimum number of replicas in a shard or replication group is:

  • Redis (cluster mode disabled)

    • If Multi-AZ with Automatic Failover is enabled: 1

    • If Multi-AZ with Automatic Failover is not enable: 0

  • Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)

" + "documentation":"

The number of replicas you want in this node group at the end of this operation. The maximum value for NewReplicaCount is 5. The minimum value depends upon the type of Redis replication group you are working with.

The minimum number of replicas in a shard or replication group is:

  • Redis (cluster mode disabled)

    • If Multi-AZ: 1

    • If Multi-AZ: 0

  • Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)

" }, "PreferredAvailabilityZones":{ "shape":"PreferredAvailabilityZoneList", @@ -2147,7 +2163,7 @@ }, "PreferredAvailabilityZone":{ "shape":"String", - "documentation":"

The EC2 Availability Zone in which the cluster is created.

All nodes belonging to this Memcached cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones.

Default: System chosen Availability Zone.

" + "documentation":"

The EC2 Availability Zone in which the cluster is created.

All nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones.

Default: System chosen Availability Zone.

" }, "PreferredAvailabilityZones":{ "shape":"PreferredAvailabilityZoneList", @@ -2325,7 +2341,7 @@ "members":{ "GlobalReplicationGroupIdSuffix":{ "shape":"String", - "documentation":"

The suffix for name of a Global Datastore. The suffix guarantees uniqueness of the Global Datastore name across multiple regions.

" + "documentation":"

The suffix name of a Global Datastore. The suffix guarantees uniqueness of the Global Datastore name across multiple regions.

" }, "GlobalReplicationGroupDescription":{ "shape":"String", @@ -2368,7 +2384,11 @@ }, "AutomaticFailoverEnabled":{ "shape":"BooleanOptional", - "documentation":"

Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.

If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group.

AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups.

Default: false

Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover on:

  • Redis versions earlier than 2.8.6.

  • Redis (cluster mode disabled): T1 node types.

  • Redis (cluster mode enabled): T1 node types.

" + "documentation":"

Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.

AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups.

Default: false

" + }, + "MultiAZEnabled":{ + "shape":"BooleanOptional", + "documentation":"

A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ.

" }, "NumCacheClusters":{ "shape":"IntegerOptional", @@ -2388,7 +2408,7 @@ }, "NodeGroupConfiguration":{ "shape":"NodeGroupConfigurationList", - "documentation":"

A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots.

If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file, you must configure each node group (shard) using this parameter because you must specify the slots for each node group.

" + "documentation":"

A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots.

If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group.

" }, "CacheNodeType":{ "shape":"String", @@ -2575,7 +2595,7 @@ }, "NewReplicaCount":{ "shape":"IntegerOptional", - "documentation":"

The number of read replica nodes you want at the completion of this operation. For Redis (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups.

The minimum number of replicas in a shard or replication group is:

  • Redis (cluster mode disabled)

    • If Multi-AZ with Automatic Failover is enabled: 1

    • If Multi-AZ with Automatic Failover is not enabled: 0

  • Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)

" + "documentation":"

The number of read replica nodes you want at the completion of this operation. For Redis (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups.

The minimum number of replicas in a shard or replication group is:

  • Redis (cluster mode disabled)

    • If Multi-AZ is enabled: 1

    • If Multi-AZ is not enabled: 0

  • Redis (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails)

" }, "ReplicaConfiguration":{ "shape":"ReplicaConfigurationList", @@ -2664,7 +2684,7 @@ }, "RetainPrimaryReplicationGroup":{ "shape":"Boolean", - "documentation":"

If set to true, the primary replication is retained as a standalone replication group.

" + "documentation":"

The primary replication group is retained as a standalone replication group.

" } } }, @@ -3357,7 +3377,7 @@ }, "Engine":{ "shape":"String", - "documentation":"

The Elasticache engine. For preview, it is Redis only.

" + "documentation":"

The Elasticache engine. For Redis only.

" }, "EngineVersion":{ "shape":"String", @@ -3386,9 +3406,13 @@ "AtRestEncryptionEnabled":{ "shape":"BooleanOptional", "documentation":"

A flag that enables encryption at rest when set to true.

You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group.

Required: Only available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x or later.

" + }, + "ARN":{ + "shape":"String", + "documentation":"

The ARN (Amazon Resource Name) of the global replication group.

" } }, - "documentation":"

Consists of a primary cluster that accepts writes and an associated secondary cluster that resides in a different AWS region. The secondary cluster accepts only reads. The primary cluster automatically replicates updates to the secondary cluster.

  • The GlobalReplicationGroupId represents the name of the Global Datastore, which is what you use to associate a secondary cluster.

", + "documentation":"

Consists of a primary cluster that accepts writes and an associated secondary cluster that resides in a different AWS region. The secondary cluster accepts only reads. The primary cluster automatically replicates updates to the secondary cluster.

  • The GlobalReplicationGroupIdSuffix represents the name of the Global Datastore, which is what you use to associate a secondary cluster.

", "wrapper":true }, "GlobalReplicationGroupAlreadyExistsFault":{ @@ -3599,7 +3623,7 @@ "type":"structure", "members":{ }, - "documentation":"

The Global Datastore is not available

", + "documentation":"

The Global Datastore is not available or in primary-only state.

", "error":{ "code":"InvalidGlobalReplicationGroupState", "httpStatusCode":400, @@ -3875,7 +3899,7 @@ }, "ApplyImmediately":{ "shape":"Boolean", - "documentation":"

If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group. If false, changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.

" + "documentation":"

This parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible. Modifications to Global Replication Groups cannot be requested to be applied in PreferredMaintenceWindow.

" }, "CacheNodeType":{ "shape":"String", @@ -3923,7 +3947,11 @@ }, "AutomaticFailoverEnabled":{ "shape":"BooleanOptional", - "documentation":"

Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.

Valid values: true | false

Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover on:

  • Redis versions earlier than 2.8.6.

  • Redis (cluster mode disabled): T1 node types.

  • Redis (cluster mode enabled): T1 node types.

" + "documentation":"

Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.

Valid values: true | false

" + }, + "MultiAZEnabled":{ + "shape":"BooleanOptional", + "documentation":"

A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ.

" }, "NodeGroupId":{ "shape":"String", @@ -4036,6 +4064,13 @@ "ReplicationGroup":{"shape":"ReplicationGroup"} } }, + "MultiAZStatus":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, "NoOperationFault":{ "type":"structure", "members":{ @@ -4057,7 +4092,7 @@ }, "Status":{ "shape":"String", - "documentation":"

The current state of this replication group - creating, available, etc.

" + "documentation":"

The current state of this replication group - creating, available, modifying, deleting.

" }, "PrimaryEndpoint":{ "shape":"Endpoint", @@ -4690,7 +4725,11 @@ }, "AutomaticFailover":{ "shape":"AutomaticFailoverStatus", - "documentation":"

Indicates the status of Multi-AZ with automatic failover for this Redis replication group.

Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover on:

  • Redis versions earlier than 2.8.6.

  • Redis (cluster mode disabled): T1 node types.

  • Redis (cluster mode enabled): T1 node types.

" + "documentation":"

Indicates the status of automatic failover for this Redis replication group.

" + }, + "MultiAZ":{ + "shape":"MultiAZStatus", + "documentation":"

A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ

" }, "ConfigurationEndpoint":{ "shape":"Endpoint", @@ -4731,6 +4770,10 @@ "KmsKeyId":{ "shape":"String", "documentation":"

The ID of the KMS key used to encrypt the disk in the cluster.

" + }, + "ARN":{ + "shape":"String", + "documentation":"

The ARN (Amazon Resource Name) of the replication group.

" } }, "documentation":"

Contains all of the attributes of a specific Redis replication group.

", @@ -4819,7 +4862,7 @@ }, "AutomaticFailoverStatus":{ "shape":"PendingAutomaticFailoverStatus", - "documentation":"

Indicates the status of Multi-AZ with automatic failover for this Redis replication group.

Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover on:

  • Redis versions earlier than 2.8.6.

  • Redis (cluster mode disabled): T1 node types.

  • Redis (cluster mode enabled): T1 node types.

" + "documentation":"

Indicates the status of automatic failover for this Redis replication group.

" }, "Resharding":{ "shape":"ReshardingStatus", @@ -5359,7 +5402,7 @@ }, "AutomaticFailover":{ "shape":"AutomaticFailoverStatus", - "documentation":"

Indicates the status of Multi-AZ with automatic failover for the source Redis replication group.

Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover on:

  • Redis versions earlier than 2.8.6.

  • Redis (cluster mode disabled): T1 node types.

  • Redis (cluster mode enabled): T1 node types.

" + "documentation":"

Indicates the status of automatic failover for the source Redis replication group.

" }, "NodeSnapshots":{ "shape":"NodeSnapshotList", @@ -5368,6 +5411,10 @@ "KmsKeyId":{ "shape":"String", "documentation":"

The ID of the KMS key used to encrypt the snapshot.

" + }, + "ARN":{ + "shape":"String", + "documentation":"

The ARN (Amazon Resource Name) of the snapshot.

" } }, "documentation":"

Represents a copy of an entire Redis cluster as of the time when the snapshot was taken.

", @@ -5749,13 +5796,16 @@ "in-progress", "stopping", "stopped", - "complete" + "complete", + "scheduling", + "scheduled", + "not-applicable" ] }, "UpdateActionStatusList":{ "type":"list", "member":{"shape":"UpdateActionStatus"}, - "max":6 + "max":9 }, "UpdateActionsMessage":{ "type":"structure", diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index 923f6857cc1a..3bfe3998cfd3 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticbeanstalk/src/main/resources/codegen-resources/paginators-1.json b/services/elasticbeanstalk/src/main/resources/codegen-resources/paginators-1.json index b4e93b3d8cb1..874292e01c66 100755 --- a/services/elasticbeanstalk/src/main/resources/codegen-resources/paginators-1.json +++ b/services/elasticbeanstalk/src/main/resources/codegen-resources/paginators-1.json @@ -20,6 +20,11 @@ }, "ListAvailableSolutionStacks": { "result_key": "SolutionStacks" + }, + "ListPlatformBranches": { + "input_token": "NextToken", + "limit_key": "MaxRecords", + "output_token": "NextToken" } } } \ No newline at end of file diff --git a/services/elasticbeanstalk/src/main/resources/codegen-resources/service-2.json b/services/elasticbeanstalk/src/main/resources/codegen-resources/service-2.json index b66e5eab2f14..249e5a1cc288 100755 --- a/services/elasticbeanstalk/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticbeanstalk/src/main/resources/codegen-resources/service-2.json @@ -41,6 +41,18 @@ ], "documentation":"

Applies a scheduled managed action immediately. A managed action can be applied only if its status is Scheduled. Get the status and action ID of a managed action with DescribeEnvironmentManagedActions.

" }, + "AssociateEnvironmentOperationsRole":{ + "name":"AssociateEnvironmentOperationsRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateEnvironmentOperationsRoleMessage"}, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ], + "documentation":"

Add or change the operations role used by an environment. After this call is made, Elastic Beanstalk uses the associated operations role for permissions to downstream services during subsequent calls acting on this environment. For more information, see Operations roles in the AWS Elastic Beanstalk Developer Guide.

" + }, "CheckDNSAvailability":{ "name":"CheckDNSAvailability", "http":{ @@ -85,7 +97,7 @@ "errors":[ {"shape":"TooManyApplicationsException"} ], - "documentation":"

Creates an application that has one configuration template named default and no application versions.

" + "documentation":"

Creates an application that has one configuration template named default and no application versions.

" }, "CreateApplicationVersion":{ "name":"CreateApplicationVersion", @@ -105,7 +117,7 @@ {"shape":"S3LocationNotInServiceRegionException"}, {"shape":"CodeBuildNotInServiceRegionException"} ], - "documentation":"

Creates an application version for the specified application. You can create an application version from a source bundle in Amazon S3, a commit in AWS CodeCommit, or the output of an AWS CodeBuild build as follows:

Specify a commit in an AWS CodeCommit repository with SourceBuildInformation.

Specify a build in an AWS CodeBuild with SourceBuildInformation and BuildConfiguration.

Specify a source bundle in S3 with SourceBundle

Omit both SourceBuildInformation and SourceBundle to use the default sample application.

Once you create an application version with a specified Amazon S3 bucket and key location, you cannot change that Amazon S3 location. If you change the Amazon S3 location, you receive an exception when you attempt to launch an environment from the application version.

" + "documentation":"

Creates an application version for the specified application. You can create an application version from a source bundle in Amazon S3, a commit in AWS CodeCommit, or the output of an AWS CodeBuild build as follows:

Specify a commit in an AWS CodeCommit repository with SourceBuildInformation.

Specify a build in an AWS CodeBuild with SourceBuildInformation and BuildConfiguration.

Specify a source bundle in S3 with SourceBundle

Omit both SourceBuildInformation and SourceBundle to use the default sample application.

After you create an application version with a specified Amazon S3 bucket and key location, you can't change that Amazon S3 location. If you change the Amazon S3 location, you receive an exception when you attempt to launch an environment from the application version.

" }, "CreateConfigurationTemplate":{ "name":"CreateConfigurationTemplate", @@ -123,7 +135,7 @@ {"shape":"TooManyBucketsException"}, {"shape":"TooManyConfigurationTemplatesException"} ], - "documentation":"

Creates a configuration template. Templates are associated with a specific application and are used to deploy different versions of the application with the same configuration settings.

Templates aren't associated with any environment. The EnvironmentName response element is always null.

Related Topics

" + "documentation":"

Creates an AWS Elastic Beanstalk configuration template, associated with a specific Elastic Beanstalk application. You define application configuration settings in a configuration template. You can then use the configuration template to deploy different versions of the application with the same configuration settings.

Templates aren't associated with any environment. The EnvironmentName response element is always null.

Related Topics

" }, "CreateEnvironment":{ "name":"CreateEnvironment", @@ -140,7 +152,7 @@ {"shape":"TooManyEnvironmentsException"}, {"shape":"InsufficientPrivilegesException"} ], - "documentation":"

Launches an environment for the specified application using the specified configuration.

" + "documentation":"

Launches an AWS Elastic Beanstalk environment for the specified application using the specified configuration.

" }, "CreatePlatformVersion":{ "name":"CreatePlatformVersion", @@ -440,7 +452,19 @@ {"shape":"InsufficientPrivilegesException"}, {"shape":"ElasticBeanstalkServiceException"} ], - "documentation":"

Describes the version of the platform.

" + "documentation":"

Describes a platform version. Provides full details. Compare to ListPlatformVersions, which provides summary information about a list of platform versions.

For definitions of platform version and other platform-related terms, see AWS Elastic Beanstalk Platforms Glossary.

" + }, + "DisassociateEnvironmentOperationsRole":{ + "name":"DisassociateEnvironmentOperationsRole", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateEnvironmentOperationsRoleMessage"}, + "errors":[ + {"shape":"InsufficientPrivilegesException"} + ], + "documentation":"

Disassociate the operations role from an environment. After this call is made, Elastic Beanstalk uses the caller's permissions for permissions to downstream services during subsequent calls acting on this environment. For more information, see Operations roles in the AWS Elastic Beanstalk Developer Guide.

" }, "ListAvailableSolutionStacks":{ "name":"ListAvailableSolutionStacks", @@ -454,6 +478,19 @@ }, "documentation":"

Returns a list of the available solution stack names, with the public version first and then in reverse chronological order.

" }, + "ListPlatformBranches":{ + "name":"ListPlatformBranches", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPlatformBranchesRequest"}, + "output":{ + "shape":"ListPlatformBranchesResult", + "resultWrapper":"ListPlatformBranchesResult" + }, + "documentation":"

Lists the platform branches available for your account in an AWS Region. Provides summary information about each platform branch.

For definitions of platform branch and other platform-related terms, see AWS Elastic Beanstalk Platforms Glossary.

" + }, "ListPlatformVersions":{ "name":"ListPlatformVersions", "http":{ @@ -469,7 +506,7 @@ {"shape":"InsufficientPrivilegesException"}, {"shape":"ElasticBeanstalkServiceException"} ], - "documentation":"

Lists the available platforms.

" + "documentation":"

Lists the platform versions available for your account in an AWS Region. Provides summary information about each platform version. Compare to DescribePlatformVersion, which provides full details about a single platform version.

For definitions of platform version and other platform-related terms, see AWS Elastic Beanstalk Platforms Glossary.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -487,7 +524,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceTypeNotSupportedException"} ], - "documentation":"

Returns the tags applied to an AWS Elastic Beanstalk resource. The response contains a list of tag key-value pairs.

Currently, Elastic Beanstalk only supports tagging of Elastic Beanstalk environments. For details about environment tagging, see Tagging Resources in Your Elastic Beanstalk Environment.

" + "documentation":"

Return the tags applied to an AWS Elastic Beanstalk resource. The response contains a list of tag key-value pairs.

Elastic Beanstalk supports tagging of all of its resources. For details about resource tagging, see Tagging Application Resources.

" }, "RebuildEnvironment":{ "name":"RebuildEnvironment", @@ -647,7 +684,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceTypeNotSupportedException"} ], - "documentation":"

Update the list of tags applied to an AWS Elastic Beanstalk resource. Two lists can be passed: TagsToAdd for tags to add or update, and TagsToRemove.

Currently, Elastic Beanstalk only supports tagging of Elastic Beanstalk environments. For details about environment tagging, see Tagging Resources in Your Elastic Beanstalk Environment.

If you create a custom IAM user policy to control permission to this operation, specify one of the following two virtual actions (or both) instead of the API operation name:

elasticbeanstalk:AddTags

Controls permission to call UpdateTagsForResource and pass a list of tags to add in the TagsToAdd parameter.

elasticbeanstalk:RemoveTags

Controls permission to call UpdateTagsForResource and pass a list of tag keys to remove in the TagsToRemove parameter.

For details about creating a custom user policy, see Creating a Custom User Policy.

" + "documentation":"

Update the list of tags applied to an AWS Elastic Beanstalk resource. Two lists can be passed: TagsToAdd for tags to add or update, and TagsToRemove.

Elastic Beanstalk supports tagging of all of its resources. For details about resource tagging, see Tagging Application Resources.

If you create a custom IAM user policy to control permission to this operation, specify one of the following two virtual actions (or both) instead of the API operation name:

elasticbeanstalk:AddTags

Controls permission to call UpdateTagsForResource and pass a list of tags to add in the TagsToAdd parameter.

elasticbeanstalk:RemoveTags

Controls permission to call UpdateTagsForResource and pass a list of tag keys to remove in the TagsToRemove parameter.

For details about creating a custom user policy, see Creating a Custom User Policy.

" }, "ValidateConfigurationSettings":{ "name":"ValidateConfigurationSettings", @@ -812,10 +849,10 @@ }, "VersionLifecycleConfig":{ "shape":"ApplicationVersionLifecycleConfig", - "documentation":"

The application version lifecycle configuration.

" + "documentation":"

Defines lifecycle settings for application versions.

" } }, - "documentation":"

The resource lifecycle configuration for an application. Defines lifecycle settings for resources that belong to the application, and the service role that Elastic Beanstalk assumes in order to apply lifecycle settings. The version lifecycle configuration defines lifecycle settings for application versions.

" + "documentation":"

The resource lifecycle configuration for an application. Defines lifecycle settings for resources that belong to the application, and the service role that AWS Elastic Beanstalk assumes in order to apply lifecycle settings. The version lifecycle configuration defines lifecycle settings for application versions.

" }, "ApplicationResourceLifecycleDescriptionMessage":{ "type":"structure", @@ -971,6 +1008,24 @@ }, "documentation":"

The result message containing information about the managed action.

" }, + "AssociateEnvironmentOperationsRoleMessage":{ + "type":"structure", + "required":[ + "EnvironmentName", + "OperationsRole" + ], + "members":{ + "EnvironmentName":{ + "shape":"EnvironmentName", + "documentation":"

The name of the environment to which to set the operations role.

" + }, + "OperationsRole":{ + "shape":"OperationsRole", + "documentation":"

The Amazon Resource Name (ARN) of an existing IAM role to be used as the environment's operations role.

" + } + }, + "documentation":"

Request to add or change the operations role used by an environment.

" + }, "AutoCreateApplication":{"type":"boolean"}, "AutoScalingGroup":{ "type":"structure", @@ -996,6 +1051,8 @@ }, "BoxedBoolean":{"type":"boolean"}, "BoxedInt":{"type":"integer"}, + "BranchName":{"type":"string"}, + "BranchOrder":{"type":"integer"}, "BuildConfiguration":{ "type":"structure", "required":[ @@ -1221,11 +1278,11 @@ "members":{ "ResourceName":{ "shape":"ResourceName", - "documentation":"

A unique resource name for a time-based scaling configuration option.

" + "documentation":"

A unique resource name for the option setting. Use it for a time–based scaling configuration option.

" }, "Namespace":{ "shape":"OptionNamespace", - "documentation":"

A unique namespace identifying the option's associated AWS resource.

" + "documentation":"

A unique namespace that identifies the option's associated AWS resource.

" }, "OptionName":{ "shape":"ConfigurationOptionName", @@ -1236,7 +1293,7 @@ "documentation":"

The current value for the configuration option.

" } }, - "documentation":"

A specification identifying an individual configuration option along with its current value. For a list of possible option values, go to Option Values in the AWS Elastic Beanstalk Developer Guide.

" + "documentation":"

A specification identifying an individual configuration option along with its current value. For a list of possible namespaces and option values, see Option Values in the AWS Elastic Beanstalk Developer Guide.

" }, "ConfigurationOptionSettingsList":{ "type":"list", @@ -1260,7 +1317,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the platform.

" + "documentation":"

The ARN of the platform version.

" }, "Options":{ "shape":"ConfigurationOptionDescriptionsList", @@ -1278,7 +1335,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the platform.

" + "documentation":"

The ARN of the platform version.

" }, "ApplicationName":{ "shape":"ApplicationName", @@ -1354,15 +1411,15 @@ "members":{ "ApplicationName":{ "shape":"ApplicationName", - "documentation":"

The name of the application.

Constraint: This name must be unique within your account. If the specified name already exists, the action returns an InvalidParameterValue error.

" + "documentation":"

The name of the application. Must be unique within your account.

" }, "Description":{ "shape":"Description", - "documentation":"

Describes the application.

" + "documentation":"

Your description of the application.

" }, "ResourceLifecycleConfig":{ "shape":"ApplicationResourceLifecycleConfig", - "documentation":"

Specify an application resource lifecycle configuration to prevent your application from accumulating too many versions.

" + "documentation":"

Specifies an application resource lifecycle configuration to prevent your application from accumulating too many versions.

" }, "Tags":{ "shape":"Tags", @@ -1388,7 +1445,7 @@ }, "Description":{ "shape":"Description", - "documentation":"

Describes this version.

" + "documentation":"

A description of this application version.

" }, "SourceBuildInformation":{ "shape":"SourceBuildInformation", @@ -1426,35 +1483,35 @@ "members":{ "ApplicationName":{ "shape":"ApplicationName", - "documentation":"

The name of the application to associate with this configuration template. If no application is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.

" + "documentation":"

The name of the Elastic Beanstalk application to associate with this configuration template.

" }, "TemplateName":{ "shape":"ConfigurationTemplateName", - "documentation":"

The name of the configuration template.

Constraint: This name must be unique per application.

Default: If a configuration template already exists with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.

" + "documentation":"

The name of the configuration template.

Constraint: This name must be unique per application.

" }, "SolutionStackName":{ "shape":"SolutionStackName", - "documentation":"

The name of the solution stack used by this configuration. The solution stack specifies the operating system, architecture, and application server for a configuration template. It determines the set of configuration options as well as the possible and default values.

Use ListAvailableSolutionStacks to obtain a list of available solution stacks.

A solution stack name or a source configuration parameter must be specified, otherwise AWS Elastic Beanstalk returns an InvalidParameterValue error.

If a solution stack name is not specified and the source configuration parameter is specified, AWS Elastic Beanstalk uses the same solution stack as the source configuration template.

" + "documentation":"

The name of an Elastic Beanstalk solution stack (platform version) that this configuration uses. For example, 64bit Amazon Linux 2013.09 running Tomcat 7 Java 7. A solution stack specifies the operating system, runtime, and application server for a configuration template. It also determines the set of configuration options as well as the possible and default values. For more information, see Supported Platforms in the AWS Elastic Beanstalk Developer Guide.

You must specify SolutionStackName if you don't specify PlatformArn, EnvironmentId, or SourceConfiguration.

Use the ListAvailableSolutionStacks API to obtain a list of available solution stacks.

" }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the custom platform.

" + "documentation":"

The Amazon Resource Name (ARN) of the custom platform. For more information, see Custom Platforms in the AWS Elastic Beanstalk Developer Guide.

If you specify PlatformArn, then don't specify SolutionStackName.

" }, "SourceConfiguration":{ "shape":"SourceConfiguration", - "documentation":"

If specified, AWS Elastic Beanstalk uses the configuration values from the specified configuration template to create a new configuration.

Values specified in the OptionSettings parameter of this call overrides any values obtained from the SourceConfiguration.

If no configuration template is found, returns an InvalidParameterValue error.

Constraint: If both the solution stack name parameter and the source configuration parameters are specified, the solution stack of the source configuration template must match the specified solution stack name or else AWS Elastic Beanstalk returns an InvalidParameterCombination error.

" + "documentation":"

An Elastic Beanstalk configuration template to base this one on. If specified, Elastic Beanstalk uses the configuration values from the specified configuration template to create a new configuration.

Values specified in OptionSettings override any values obtained from the SourceConfiguration.

You must specify SourceConfiguration if you don't specify PlatformArn, EnvironmentId, or SolutionStackName.

Constraint: If both solution stack name and source configuration are specified, the solution stack of the source configuration template must match the specified solution stack name.

" }, "EnvironmentId":{ "shape":"EnvironmentId", - "documentation":"

The ID of the environment used with this configuration template.

" + "documentation":"

The ID of an environment whose settings you want to use to create the configuration template. You must specify EnvironmentId if you don't specify PlatformArn, SolutionStackName, or SourceConfiguration.

" }, "Description":{ "shape":"Description", - "documentation":"

Describes this configuration.

" + "documentation":"

An optional description for this configuration.

" }, "OptionSettings":{ "shape":"ConfigurationOptionSettingsList", - "documentation":"

If specified, AWS Elastic Beanstalk sets the specified configuration option to the requested value. The new value overrides the value obtained from the solution stack or the source configuration template.

" + "documentation":"

Option values for the Elastic Beanstalk configuration, such as the instance type. If specified, these values override the values obtained from the solution stack or the source configuration template. For a complete list of Elastic Beanstalk configuration options, see Option Values in the AWS Elastic Beanstalk Developer Guide.

" }, "Tags":{ "shape":"Tags", @@ -1469,11 +1526,11 @@ "members":{ "ApplicationName":{ "shape":"ApplicationName", - "documentation":"

The name of the application that contains the version to be deployed.

If no application is found with this name, CreateEnvironment returns an InvalidParameterValue error.

" + "documentation":"

The name of the application that is associated with this environment.

" }, "EnvironmentName":{ "shape":"EnvironmentName", - "documentation":"

A unique name for the deployment environment. Used in the application URL.

Constraint: Must be from 4 to 40 characters in length. The name can contain only letters, numbers, and hyphens. It cannot start or end with a hyphen. This name must be unique within a region in your account. If the specified name already exists in the region, AWS Elastic Beanstalk returns an InvalidParameterValue error.

Default: If the CNAME parameter is not specified, the environment name becomes part of the CNAME, and therefore part of the visible URL for your application.

" + "documentation":"

A unique name for the environment.

Constraint: Must be from 4 to 40 characters in length. The name can contain only letters, numbers, and hyphens. It can't start or end with a hyphen. This name must be unique within a region in your account. If the specified name already exists in the region, Elastic Beanstalk returns an InvalidParameterValue error.

If you don't specify the CNAMEPrefix parameter, the environment name becomes part of the CNAME, and therefore part of the visible URL for your application.

" }, "GroupName":{ "shape":"GroupName", @@ -1481,15 +1538,15 @@ }, "Description":{ "shape":"Description", - "documentation":"

Describes this environment.

" + "documentation":"

Your description for this environment.

" }, "CNAMEPrefix":{ "shape":"DNSCnamePrefix", - "documentation":"

If specified, the environment attempts to use this value as the prefix for the CNAME. If not specified, the CNAME is generated automatically by appending a random alphanumeric string to the environment name.

" + "documentation":"

If specified, the environment attempts to use this value as the prefix for the CNAME in your Elastic Beanstalk environment URL. If not specified, the CNAME is generated automatically by appending a random alphanumeric string to the environment name.

" }, "Tier":{ "shape":"EnvironmentTier", - "documentation":"

This specifies the tier to use for creating this environment.

" + "documentation":"

Specifies the tier to use in creating this environment. The environment tier that you choose determines whether Elastic Beanstalk provisions resources to support a web application that handles HTTP(S) requests or a web application that handles background-processing tasks.

" }, "Tags":{ "shape":"Tags", @@ -1497,19 +1554,19 @@ }, "VersionLabel":{ "shape":"VersionLabel", - "documentation":"

The name of the application version to deploy.

If the specified application has no associated application versions, AWS Elastic Beanstalk UpdateEnvironment returns an InvalidParameterValue error.

Default: If not specified, AWS Elastic Beanstalk attempts to launch the sample application in the container.

" + "documentation":"

The name of the application version to deploy.

Default: If not specified, Elastic Beanstalk attempts to deploy the sample application.

" }, "TemplateName":{ "shape":"ConfigurationTemplateName", - "documentation":"

The name of the configuration template to use in deployment. If no configuration template is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue error.

" + "documentation":"

The name of the Elastic Beanstalk configuration template to use with the environment.

If you specify TemplateName, then don't specify SolutionStackName.

" }, "SolutionStackName":{ "shape":"SolutionStackName", - "documentation":"

This is an alternative to specifying a template name. If specified, AWS Elastic Beanstalk sets the configuration values to the default values associated with the specified solution stack.

For a list of current solution stacks, see Elastic Beanstalk Supported Platforms.

" + "documentation":"

The name of an Elastic Beanstalk solution stack (platform version) to use with the environment. If specified, Elastic Beanstalk sets the configuration values to the default values associated with the specified solution stack. For a list of current solution stacks, see Elastic Beanstalk Supported Platforms in the AWS Elastic Beanstalk Platforms guide.

If you specify SolutionStackName, don't specify PlatformArn or TemplateName.

" }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the platform.

" + "documentation":"

The Amazon Resource Name (ARN) of the custom platform to use with the environment. For more information, see Custom Platforms in the AWS Elastic Beanstalk Developer Guide.

If you specify PlatformArn, don't specify SolutionStackName.

" }, "OptionSettings":{ "shape":"ConfigurationOptionSettingsList", @@ -1518,6 +1575,10 @@ "OptionsToRemove":{ "shape":"OptionsSpecifierList", "documentation":"

A list of custom user-defined configuration options to remove from the configuration set for this new environment.

" + }, + "OperationsRole":{ + "shape":"OperationsRole", + "documentation":"

The Amazon Resource Name (ARN) of an existing IAM role to be used as the environment's operations role. If specified, Elastic Beanstalk uses the operations role for permissions to downstream services during this call and during subsequent calls acting on this environment. To specify an operations role, you must have the iam:PassRole permission for the role. For more information, see Operations roles in the AWS Elastic Beanstalk Developer Guide.

" } }, "documentation":"

" @@ -2011,7 +2072,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the version of the custom platform.

" + "documentation":"

The ARN of a custom platform version. If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this custom platform version.

" }, "RequestId":{ "shape":"RequestId", @@ -2085,7 +2146,7 @@ "members":{ "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the version of the platform.

" + "documentation":"

The ARN of the platform version.

" } } }, @@ -2094,7 +2155,7 @@ "members":{ "PlatformDescription":{ "shape":"PlatformDescription", - "documentation":"

Detailed information about the version of the platform.

" + "documentation":"

Detailed information about the platform version.

" } } }, @@ -2102,6 +2163,17 @@ "type":"string", "max":200 }, + "DisassociateEnvironmentOperationsRoleMessage":{ + "type":"structure", + "required":["EnvironmentName"], + "members":{ + "EnvironmentName":{ + "shape":"EnvironmentName", + "documentation":"

The name of the environment from which to disassociate the operations role.

" + } + }, + "documentation":"

Request to disassociate the operations role from an environment.

" + }, "Ec2InstanceId":{"type":"string"}, "ElasticBeanstalkServiceException":{ "type":"structure", @@ -2141,7 +2213,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the platform.

" + "documentation":"

The ARN of the platform version.

" }, "TemplateName":{ "shape":"ConfigurationTemplateName", @@ -2198,6 +2270,10 @@ "EnvironmentArn":{ "shape":"EnvironmentArn", "documentation":"

The environment's Amazon Resource Name (ARN), which can be used in other API requests that require an ARN.

" + }, + "OperationsRole":{ + "shape":"OperationsRole", + "documentation":"

The Amazon Resource Name (ARN) of the environment's operations role. For more information, see Operations roles in the AWS Elastic Beanstalk Developer Guide.

" } }, "documentation":"

Describes the properties of an environment.

" @@ -2441,7 +2517,7 @@ }, "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the platform.

" + "documentation":"

The ARN of the platform version.

" }, "RequestId":{ "shape":"RequestId", @@ -2698,20 +2774,50 @@ }, "documentation":"

A list of available AWS Elastic Beanstalk solution stacks.

" }, + "ListPlatformBranchesRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"SearchFilters", + "documentation":"

Criteria for restricting the resulting list of platform branches. The filter is evaluated as a logical conjunction (AND) of the separate SearchFilter terms.

The following list shows valid attribute values for each of the SearchFilter terms. Most operators take a single value. The in and not_in operators can take multiple values.

  • Attribute = BranchName:

    • Operator: = | != | begins_with | ends_with | contains | in | not_in

  • Attribute = LifecycleState:

    • Operator: = | != | in | not_in

    • Values: beta | supported | deprecated | retired

  • Attribute = PlatformName:

    • Operator: = | != | begins_with | ends_with | contains | in | not_in

  • Attribute = TierType:

    • Operator: = | !=

    • Values: WebServer/Standard | Worker/SQS/HTTP

Array size: limited to 10 SearchFilter objects.

Within each SearchFilter item, the Values array is limited to 10 items.

" + }, + "MaxRecords":{ + "shape":"PlatformBranchMaxRecords", + "documentation":"

The maximum number of platform branch values returned in one call.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

For a paginated request. Specify a token from a previous response page to retrieve the next response page. All other parameter values must be identical to the ones specified in the initial request.

If no NextToken is specified, the first page is retrieved.

" + } + } + }, + "ListPlatformBranchesResult":{ + "type":"structure", + "members":{ + "PlatformBranchSummaryList":{ + "shape":"PlatformBranchSummaryList", + "documentation":"

Summary information about the platform branches.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

In a paginated request, if this value isn't null, it's the token that you can pass in a subsequent request to get the next response page.

" + } + } + }, "ListPlatformVersionsRequest":{ "type":"structure", "members":{ "Filters":{ "shape":"PlatformFilters", - "documentation":"

List only the platforms where the platform member value relates to one of the supplied values.

" + "documentation":"

Criteria for restricting the resulting list of platform versions. The filter is interpreted as a logical conjunction (AND) of the separate PlatformFilter terms.

" }, "MaxRecords":{ "shape":"PlatformMaxRecords", - "documentation":"

The maximum number of platform values returned in one call.

" + "documentation":"

The maximum number of platform version values returned in one call.

" }, "NextToken":{ "shape":"Token", - "documentation":"

The starting index into the remaining list of platforms. Use the NextToken value from a previous ListPlatformVersion call.

" + "documentation":"

For a paginated request. Specify a token from a previous response page to retrieve the next response page. All other parameter values must be identical to the ones specified in the initial request.

If no NextToken is specified, the first page is retrieved.

" } } }, @@ -2720,11 +2826,11 @@ "members":{ "PlatformSummaryList":{ "shape":"PlatformSummaryList", - "documentation":"

Detailed information about the platforms.

" + "documentation":"

Summary information about the platform versions.

" }, "NextToken":{ "shape":"Token", - "documentation":"

The starting index into the remaining list of platforms. if this value is not null, you can use it in a subsequent ListPlatformVersion call.

" + "documentation":"

In a paginated request, if this value isn't null, it's the token that you can pass in a subsequent request to get the next response page.

" } } }, @@ -2734,7 +2840,7 @@ "members":{ "ResourceArn":{ "shape":"ResourceArn", - "documentation":"

The Amazon Resource Name (ARN) of the resouce for which a tag list is requested.

Must be the ARN of an Elastic Beanstalk environment.

" + "documentation":"

The Amazon Resource Name (ARN) of the resouce for which a tag list is requested.

Must be the ARN of an Elastic Beanstalk resource.

" } } }, @@ -2952,6 +3058,11 @@ }, "exception":true }, + "OperationsRole":{ + "type":"string", + "max":256, + "min":1 + }, "OptionNamespace":{"type":"string"}, "OptionRestrictionMaxLength":{"type":"integer"}, "OptionRestrictionMaxValue":{"type":"integer"}, @@ -2993,102 +3104,149 @@ "member":{"shape":"OptionSpecification"} }, "PlatformArn":{"type":"string"}, + "PlatformBranchLifecycleState":{"type":"string"}, + "PlatformBranchMaxRecords":{ + "type":"integer", + "min":1 + }, + "PlatformBranchSummary":{ + "type":"structure", + "members":{ + "PlatformName":{ + "shape":"PlatformName", + "documentation":"

The name of the platform to which this platform branch belongs.

" + }, + "BranchName":{ + "shape":"BranchName", + "documentation":"

The name of the platform branch.

" + }, + "LifecycleState":{ + "shape":"PlatformBranchLifecycleState", + "documentation":"

The support life cycle state of the platform branch.

Possible values: beta | supported | deprecated | retired

" + }, + "BranchOrder":{ + "shape":"BranchOrder", + "documentation":"

An ordinal number that designates the order in which platform branches have been added to a platform. This can be helpful, for example, if your code calls the ListPlatformBranches action and then displays a list of platform branches.

A larger BranchOrder value designates a newer platform branch within the platform.

" + }, + "SupportedTierList":{ + "shape":"SupportedTierList", + "documentation":"

The environment tiers that platform versions in this branch support.

Possible values: WebServer/Standard | Worker/SQS/HTTP

" + } + }, + "documentation":"

Summary information about a platform branch.

" + }, + "PlatformBranchSummaryList":{ + "type":"list", + "member":{"shape":"PlatformBranchSummary"} + }, "PlatformCategory":{"type":"string"}, "PlatformDescription":{ "type":"structure", "members":{ "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the platform.

" + "documentation":"

The ARN of the platform version.

" }, "PlatformOwner":{ "shape":"PlatformOwner", - "documentation":"

The AWS account ID of the person who created the platform.

" + "documentation":"

The AWS account ID of the person who created the platform version.

" }, "PlatformName":{ "shape":"PlatformName", - "documentation":"

The name of the platform.

" + "documentation":"

The name of the platform version.

" }, "PlatformVersion":{ "shape":"PlatformVersion", - "documentation":"

The version of the platform.

" + "documentation":"

The version of the platform version.

" }, "SolutionStackName":{ "shape":"SolutionStackName", - "documentation":"

The name of the solution stack used by the platform.

" + "documentation":"

The name of the solution stack used by the platform version.

" }, "PlatformStatus":{ "shape":"PlatformStatus", - "documentation":"

The status of the platform.

" + "documentation":"

The status of the platform version.

" }, "DateCreated":{ "shape":"CreationDate", - "documentation":"

The date when the platform was created.

" + "documentation":"

The date when the platform version was created.

" }, "DateUpdated":{ "shape":"UpdateDate", - "documentation":"

The date when the platform was last updated.

" + "documentation":"

The date when the platform version was last updated.

" }, "PlatformCategory":{ "shape":"PlatformCategory", - "documentation":"

The category of the platform.

" + "documentation":"

The category of the platform version.

" }, "Description":{ "shape":"Description", - "documentation":"

The description of the platform.

" + "documentation":"

The description of the platform version.

" }, "Maintainer":{ "shape":"Maintainer", - "documentation":"

Information about the maintainer of the platform.

" + "documentation":"

Information about the maintainer of the platform version.

" }, "OperatingSystemName":{ "shape":"OperatingSystemName", - "documentation":"

The operating system used by the platform.

" + "documentation":"

The operating system used by the platform version.

" }, "OperatingSystemVersion":{ "shape":"OperatingSystemVersion", - "documentation":"

The version of the operating system used by the platform.

" + "documentation":"

The version of the operating system used by the platform version.

" }, "ProgrammingLanguages":{ "shape":"PlatformProgrammingLanguages", - "documentation":"

The programming languages supported by the platform.

" + "documentation":"

The programming languages supported by the platform version.

" }, "Frameworks":{ "shape":"PlatformFrameworks", - "documentation":"

The frameworks supported by the platform.

" + "documentation":"

The frameworks supported by the platform version.

" }, "CustomAmiList":{ "shape":"CustomAmiList", - "documentation":"

The custom AMIs supported by the platform.

" + "documentation":"

The custom AMIs supported by the platform version.

" }, "SupportedTierList":{ "shape":"SupportedTierList", - "documentation":"

The tiers supported by the platform.

" + "documentation":"

The tiers supported by the platform version.

" }, "SupportedAddonList":{ "shape":"SupportedAddonList", - "documentation":"

The additions supported by the platform.

" + "documentation":"

The additions supported by the platform version.

" + }, + "PlatformLifecycleState":{ + "shape":"PlatformLifecycleState", + "documentation":"

The state of the platform version in its lifecycle.

Possible values: Recommended | null

If a null value is returned, the platform version isn't the recommended one for its branch. Each platform branch has a single recommended platform version, typically the most recent one.

" + }, + "PlatformBranchName":{ + "shape":"BranchName", + "documentation":"

The platform branch to which the platform version belongs.

" + }, + "PlatformBranchLifecycleState":{ + "shape":"PlatformBranchLifecycleState", + "documentation":"

The state of the platform version's branch in its lifecycle.

Possible values: Beta | Supported | Deprecated | Retired

" } }, - "documentation":"

Detailed information about a platform.

" + "documentation":"

Detailed information about a platform version.

" }, "PlatformFilter":{ "type":"structure", "members":{ "Type":{ "shape":"PlatformFilterType", - "documentation":"

The custom platform attribute to which the filter values are applied.

Valid Values: PlatformName | PlatformVersion | PlatformStatus | PlatformOwner

" + "documentation":"

The platform version attribute to which the filter values are applied.

Valid values: PlatformName | PlatformVersion | PlatformStatus | PlatformBranchName | PlatformLifecycleState | PlatformOwner | SupportedTier | SupportedAddon | ProgrammingLanguageName | OperatingSystemName

" }, "Operator":{ "shape":"PlatformFilterOperator", - "documentation":"

The operator to apply to the Type with each of the Values.

Valid Values: = (equal to) | != (not equal to) | < (less than) | <= (less than or equal to) | > (greater than) | >= (greater than or equal to) | contains | begins_with | ends_with

" + "documentation":"

The operator to apply to the Type with each of the Values.

Valid values: = | != | < | <= | > | >= | contains | begins_with | ends_with

" }, "Values":{ "shape":"PlatformFilterValueList", - "documentation":"

The list of values applied to the custom platform attribute.

" + "documentation":"

The list of values applied to the filtering platform version attribute. Only one value is supported for all current operators.

The following list shows valid filter values for some filter attributes.

  • PlatformStatus: Creating | Failed | Ready | Deleting | Deleted

  • PlatformLifecycleState: recommended

  • SupportedTier: WebServer/Standard | Worker/SQS/HTTP

  • SupportedAddon: Log/S3 | Monitoring/Healthd | WorkerDaemon/SQSD

" } }, - "documentation":"

Specify criteria to restrict the results when listing custom platforms.

The filter is evaluated as the expression:

Type Operator Values[i]

" + "documentation":"

Describes criteria to restrict the results when listing platform versions.

The filter is evaluated as follows: Type Operator Values[1]

" }, "PlatformFilterOperator":{"type":"string"}, "PlatformFilterType":{"type":"string"}, @@ -3113,12 +3271,13 @@ "documentation":"

The version of the framework.

" } }, - "documentation":"

A framework supported by the custom platform.

" + "documentation":"

A framework supported by the platform.

" }, "PlatformFrameworks":{ "type":"list", "member":{"shape":"PlatformFramework"} }, + "PlatformLifecycleState":{"type":"string"}, "PlatformMaxRecords":{ "type":"integer", "min":1 @@ -3158,38 +3317,54 @@ "members":{ "PlatformArn":{ "shape":"PlatformArn", - "documentation":"

The ARN of the platform.

" + "documentation":"

The ARN of the platform version.

" }, "PlatformOwner":{ "shape":"PlatformOwner", - "documentation":"

The AWS account ID of the person who created the platform.

" + "documentation":"

The AWS account ID of the person who created the platform version.

" }, "PlatformStatus":{ "shape":"PlatformStatus", - "documentation":"

The status of the platform. You can create an environment from the platform once it is ready.

" + "documentation":"

The status of the platform version. You can create an environment from the platform version once it is ready.

" }, "PlatformCategory":{ "shape":"PlatformCategory", - "documentation":"

The category of platform.

" + "documentation":"

The category of platform version.

" }, "OperatingSystemName":{ "shape":"OperatingSystemName", - "documentation":"

The operating system used by the platform.

" + "documentation":"

The operating system used by the platform version.

" }, "OperatingSystemVersion":{ "shape":"OperatingSystemVersion", - "documentation":"

The version of the operating system used by the platform.

" + "documentation":"

The version of the operating system used by the platform version.

" }, "SupportedTierList":{ "shape":"SupportedTierList", - "documentation":"

The tiers in which the platform runs.

" + "documentation":"

The tiers in which the platform version runs.

" }, "SupportedAddonList":{ "shape":"SupportedAddonList", - "documentation":"

The additions associated with the platform.

" + "documentation":"

The additions associated with the platform version.

" + }, + "PlatformLifecycleState":{ + "shape":"PlatformLifecycleState", + "documentation":"

The state of the platform version in its lifecycle.

Possible values: recommended | empty

If an empty value is returned, the platform version is supported but isn't the recommended one for its branch.

" + }, + "PlatformVersion":{ + "shape":"PlatformVersion", + "documentation":"

The version string of the platform version.

" + }, + "PlatformBranchName":{ + "shape":"BranchName", + "documentation":"

The platform branch to which the platform version belongs.

" + }, + "PlatformBranchLifecycleState":{ + "shape":"PlatformBranchLifecycleState", + "documentation":"

The state of the platform version's branch in its lifecycle.

Possible values: beta | supported | deprecated | retired

" } }, - "documentation":"

Detailed information about a platform.

" + "documentation":"

Summary information about a platform version.

" }, "PlatformSummaryList":{ "type":"list", @@ -3324,7 +3499,7 @@ "members":{ "ResourceArn":{ "shape":"ResourceArn", - "documentation":"

The Amazon Resource Name (ARN) of the resouce for which a tag list was requested.

" + "documentation":"

The Amazon Resource Name (ARN) of the resource for which a tag list was requested.

" }, "ResourceTags":{ "shape":"TagList", @@ -3434,6 +3609,35 @@ "exception":true }, "SampleTimestamp":{"type":"timestamp"}, + "SearchFilter":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"SearchFilterAttribute", + "documentation":"

The result attribute to which the filter values are applied. Valid values vary by API action.

" + }, + "Operator":{ + "shape":"SearchFilterOperator", + "documentation":"

The operator to apply to the Attribute with each of the Values. Valid values vary by Attribute.

" + }, + "Values":{ + "shape":"SearchFilterValues", + "documentation":"

The list of values applied to the Attribute and Operator attributes. Number of values and valid values vary by Attribute.

" + } + }, + "documentation":"

Describes criteria to restrict a list of results.

For operators that apply a single value to the attribute, the filter is evaluated as follows: Attribute Operator Values[1]

Some operators, e.g. in, can apply multiple values. In this case, the filter is evaluated as a logical union (OR) of applications of the operator to the attribute with each one of the values: (Attribute Operator Values[1]) OR (Attribute Operator Values[2]) OR ...

The valid values for attributes of SearchFilter depend on the API action. For valid values, see the reference page for the API action you're calling that takes a SearchFilter parameter.

" + }, + "SearchFilterAttribute":{"type":"string"}, + "SearchFilterOperator":{"type":"string"}, + "SearchFilterValue":{"type":"string"}, + "SearchFilterValues":{ + "type":"list", + "member":{"shape":"SearchFilterValue"} + }, + "SearchFilters":{ + "type":"list", + "member":{"shape":"SearchFilter"} + }, "SingleInstanceHealth":{ "type":"structure", "members":{ @@ -3546,7 +3750,7 @@ "documentation":"

The name of the configuration template.

" } }, - "documentation":"

A specification for an environment configuration

" + "documentation":"

A specification for an environment configuration.

" }, "SourceLocation":{ "type":"string", @@ -3939,15 +4143,15 @@ "members":{ "ResourceArn":{ "shape":"ResourceArn", - "documentation":"

The Amazon Resource Name (ARN) of the resouce to be updated.

Must be the ARN of an Elastic Beanstalk environment.

" + "documentation":"

The Amazon Resource Name (ARN) of the resouce to be updated.

Must be the ARN of an Elastic Beanstalk resource.

" }, "TagsToAdd":{ "shape":"TagList", - "documentation":"

A list of tags to add or update.

If a key of an existing tag is added, the tag's value is updated.

" + "documentation":"

A list of tags to add or update. If a key of an existing tag is added, the tag's value is updated.

Specify at least one of these parameters: TagsToAdd, TagsToRemove.

" }, "TagsToRemove":{ "shape":"TagKeyList", - "documentation":"

A list of tag keys to remove.

If a tag key doesn't exist, it is silently ignored.

" + "documentation":"

A list of tag keys to remove. If a tag key doesn't exist, it is silently ignored.

Specify at least one of these parameters: TagsToAdd, TagsToRemove.

" } } }, @@ -4027,5 +4231,5 @@ }, "VirtualizationType":{"type":"string"} }, - "documentation":"AWS Elastic Beanstalk

AWS Elastic Beanstalk makes it easy for you to create, deploy, and manage scalable, fault-tolerant applications running on the Amazon Web Services cloud.

For more information about this product, go to the AWS Elastic Beanstalk details page. The location of the latest AWS Elastic Beanstalk WSDL is http://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl. To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that enable you to access the API, go to Tools for Amazon Web Services.

Endpoints

For a list of region-specific endpoints that AWS Elastic Beanstalk supports, go to Regions and Endpoints in the Amazon Web Services Glossary.

" + "documentation":"AWS Elastic Beanstalk

AWS Elastic Beanstalk makes it easy for you to create, deploy, and manage scalable, fault-tolerant applications running on the Amazon Web Services cloud.

For more information about this product, go to the AWS Elastic Beanstalk details page. The location of the latest AWS Elastic Beanstalk WSDL is https://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl. To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that enable you to access the API, go to Tools for Amazon Web Services.

Endpoints

For a list of region-specific endpoints that AWS Elastic Beanstalk supports, go to Regions and Endpoints in the Amazon Web Services Glossary.

" } diff --git a/services/elasticinference/pom.xml b/services/elasticinference/pom.xml index 0adcec95132b..2ffaa3f63c37 100644 --- a/services/elasticinference/pom.xml +++ b/services/elasticinference/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT elasticinference AWS Java SDK :: Services :: Elastic Inference diff --git a/services/elasticinference/src/main/resources/codegen-resources/paginators-1.json b/services/elasticinference/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..909b792bacb6 100644 --- a/services/elasticinference/src/main/resources/codegen-resources/paginators-1.json +++ b/services/elasticinference/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,10 @@ { "pagination": { + "DescribeAccelerators": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "acceleratorSet" + } } } diff --git a/services/elasticinference/src/main/resources/codegen-resources/service-2.json b/services/elasticinference/src/main/resources/codegen-resources/service-2.json index b01d8ea368df..6a469fc8c453 100644 --- a/services/elasticinference/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticinference/src/main/resources/codegen-resources/service-2.json @@ -2,7 +2,7 @@ "version":"2.0", "metadata":{ "apiVersion":"2017-07-25", - "endpointPrefix":"elastic-inference", + "endpointPrefix":"api.elastic-inference", "jsonVersion":"1.1", "protocol":"rest-json", "serviceAbbreviation":"Amazon Elastic Inference", @@ -13,6 +13,49 @@ "uid":"elastic-inference-2017-07-25" }, "operations":{ + "DescribeAcceleratorOfferings":{ + "name":"DescribeAcceleratorOfferings", + "http":{ + "method":"POST", + "requestUri":"/describe-accelerator-offerings" + }, + "input":{"shape":"DescribeAcceleratorOfferingsRequest"}, + "output":{"shape":"DescribeAcceleratorOfferingsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Describes the locations in which a given accelerator type or set of types is present in a given region.

" + }, + "DescribeAcceleratorTypes":{ + "name":"DescribeAcceleratorTypes", + "http":{ + "method":"GET", + "requestUri":"/describe-accelerator-types" + }, + "input":{"shape":"DescribeAcceleratorTypesRequest"}, + "output":{"shape":"DescribeAcceleratorTypesResponse"}, + "errors":[ + {"shape":"InternalServerException"} + ], + "documentation":"

Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput.

" + }, + "DescribeAccelerators":{ + "name":"DescribeAccelerators", + "http":{ + "method":"POST", + "requestUri":"/describe-accelerators" + }, + "input":{"shape":"DescribeAcceleratorsRequest"}, + "output":{"shape":"DescribeAcceleratorsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Describes information over a provided set of accelerators belonging to an account.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -26,7 +69,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"Returns all tags of an Elastic Inference Accelerator." + "documentation":"

Returns all tags of an Elastic Inference Accelerator.

" }, "TagResource":{ "name":"TagResource", @@ -41,7 +84,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"Adds the specified tag(s) to an Elastic Inference Accelerator." + "documentation":"

Adds the specified tags to an Elastic Inference Accelerator.

" }, "UntagResource":{ "name":"UntagResource", @@ -56,35 +99,275 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"Removes the specified tag(s) from an Elastic Inference Accelerator." + "documentation":"

Removes the specified tags from an Elastic Inference Accelerator.

" } }, "shapes":{ + "AcceleratorHealthStatus":{ + "type":"string", + "max":256, + "min":1 + }, + "AcceleratorId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^eia-[0-9a-f]+$" + }, + "AcceleratorIdList":{ + "type":"list", + "member":{"shape":"AcceleratorId"}, + "max":1000, + "min":0 + }, + "AcceleratorType":{ + "type":"structure", + "members":{ + "acceleratorTypeName":{ + "shape":"AcceleratorTypeName", + "documentation":"

The name of the Elastic Inference Accelerator type.

" + }, + "memoryInfo":{ + "shape":"MemoryInfo", + "documentation":"

The memory information of the Elastic Inference Accelerator type.

" + }, + "throughputInfo":{ + "shape":"ThroughputInfoList", + "documentation":"

The throughput information of the Elastic Inference Accelerator type.

" + } + }, + "documentation":"

The details of an Elastic Inference Accelerator type.

" + }, + "AcceleratorTypeList":{ + "type":"list", + "member":{"shape":"AcceleratorType"}, + "max":100, + "min":0 + }, + "AcceleratorTypeName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^\\S+$" + }, + "AcceleratorTypeNameList":{ + "type":"list", + "member":{"shape":"AcceleratorTypeName"}, + "max":100, + "min":0 + }, + "AcceleratorTypeOffering":{ + "type":"structure", + "members":{ + "acceleratorType":{ + "shape":"AcceleratorTypeName", + "documentation":"

The name of the Elastic Inference Accelerator type.

" + }, + "locationType":{ + "shape":"LocationType", + "documentation":"

The location type for the offering. It can assume the following values: region: defines that the offering is at the regional level. availability-zone: defines that the offering is at the availability zone level. availability-zone-id: defines that the offering is at the availability zone level, defined by the availability zone id.

" + }, + "location":{ + "shape":"Location", + "documentation":"

The location for the offering. It will return either the region, availability zone or availability zone id for the offering depending on the locationType value.

" + } + }, + "documentation":"

The offering for an Elastic Inference Accelerator type.

" + }, + "AcceleratorTypeOfferingList":{ + "type":"list", + "member":{"shape":"AcceleratorTypeOffering"}, + "max":100, + "min":0 + }, + "AvailabilityZone":{ + "type":"string", + "max":256, + "min":1 + }, "BadRequestException":{ "type":"structure", "members":{ "message":{"shape":"String"} }, - "documentation":"Raised when a malformed input has been provided to the API.", + "documentation":"

Raised when a malformed input has been provided to the API.

", "error":{"httpStatusCode":400}, "exception":true }, + "DescribeAcceleratorOfferingsRequest":{ + "type":"structure", + "required":["locationType"], + "members":{ + "locationType":{ + "shape":"LocationType", + "documentation":"

The location type that you want to describe accelerator type offerings for. It can assume the following values: region: will return the accelerator type offering at the regional level. availability-zone: will return the accelerator type offering at the availability zone level. availability-zone-id: will return the accelerator type offering at the availability zone level returning the availability zone id.

" + }, + "acceleratorTypes":{ + "shape":"AcceleratorTypeNameList", + "documentation":"

The list of accelerator types to describe.

" + } + } + }, + "DescribeAcceleratorOfferingsResponse":{ + "type":"structure", + "members":{ + "acceleratorTypeOfferings":{ + "shape":"AcceleratorTypeOfferingList", + "documentation":"

The list of accelerator type offerings for a specific location.

" + } + } + }, + "DescribeAcceleratorTypesRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeAcceleratorTypesResponse":{ + "type":"structure", + "members":{ + "acceleratorTypes":{ + "shape":"AcceleratorTypeList", + "documentation":"

The available accelerator types.

" + } + } + }, + "DescribeAcceleratorsRequest":{ + "type":"structure", + "members":{ + "acceleratorIds":{ + "shape":"AcceleratorIdList", + "documentation":"

The IDs of the accelerators to describe.

" + }, + "filters":{ + "shape":"FilterList", + "documentation":"

One or more filters. Filter names and values are case-sensitive. Valid filter names are: accelerator-types: can provide a list of accelerator type names to filter for. instance-id: can provide a list of EC2 instance ids to filter for.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The total number of items to return in the command's output. If the total number of items available is more than the value specified, a NextToken is provided in the command's output. To resume pagination, provide the NextToken value in the starting-token argument of a subsequent command. Do not use the NextToken response element directly outside of the AWS CLI.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + } + } + }, + "DescribeAcceleratorsResponse":{ + "type":"structure", + "members":{ + "acceleratorSet":{ + "shape":"ElasticInferenceAcceleratorSet", + "documentation":"

The details of the Elastic Inference Accelerators.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + } + } + }, + "ElasticInferenceAccelerator":{ + "type":"structure", + "members":{ + "acceleratorHealth":{ + "shape":"ElasticInferenceAcceleratorHealth", + "documentation":"

The health of the Elastic Inference Accelerator.

" + }, + "acceleratorType":{ + "shape":"AcceleratorTypeName", + "documentation":"

The type of the Elastic Inference Accelerator.

" + }, + "acceleratorId":{ + "shape":"AcceleratorId", + "documentation":"

The ID of the Elastic Inference Accelerator.

" + }, + "availabilityZone":{ + "shape":"AvailabilityZone", + "documentation":"

The availability zone where the Elastic Inference Accelerator is present.

" + }, + "attachedResource":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the resource that the Elastic Inference Accelerator is attached to.

" + } + }, + "documentation":"

The details of an Elastic Inference Accelerator.

" + }, + "ElasticInferenceAcceleratorHealth":{ + "type":"structure", + "members":{ + "status":{ + "shape":"AcceleratorHealthStatus", + "documentation":"

The health status of the Elastic Inference Accelerator.

" + } + }, + "documentation":"

The health details of an Elastic Inference Accelerator.

" + }, + "ElasticInferenceAcceleratorSet":{ + "type":"list", + "member":{"shape":"ElasticInferenceAccelerator"} + }, + "Filter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"FilterName", + "documentation":"

The filter name for the Elastic Inference Accelerator list. It can assume the following values: accelerator-type: the type of Elastic Inference Accelerator to filter for. instance-id: an EC2 instance id to filter for.

" + }, + "values":{ + "shape":"ValueStringList", + "documentation":"

The values for the filter of the Elastic Inference Accelerator list.

" + } + }, + "documentation":"

A filter expression for the Elastic Inference Accelerator list.

" + }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"}, + "max":100, + "min":0 + }, + "FilterName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^\\S+$" + }, + "Integer":{"type":"integer"}, "InternalServerException":{ "type":"structure", "members":{ "message":{"shape":"String"} }, - "documentation":"Raised when an unexpected error occurred during request processing.", + "documentation":"

Raised when an unexpected error occurred during request processing.

", "error":{"httpStatusCode":500}, "exception":true }, + "Key":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^\\S+$" + }, + "KeyValuePair":{ + "type":"structure", + "members":{ + "key":{ + "shape":"Key", + "documentation":"

The throughput value of the Elastic Inference Accelerator type. It can assume the following values: TFLOPS16bit: the throughput expressed in 16bit TeraFLOPS. TFLOPS32bit: the throughput expressed in 32bit TeraFLOPS.

" + }, + "value":{ + "shape":"Value", + "documentation":"

The throughput value of the Elastic Inference Accelerator type.

" + } + }, + "documentation":"

A throughput entry for an Elastic Inference Accelerator type.

" + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], "members":{ "resourceArn":{ "shape":"ResourceARN", - "documentation":"The ARN of the Elastic Inference Accelerator to list the tags for.", + "documentation":"

The ARN of the Elastic Inference Accelerator to list the tags for.

", "location":"uri", "locationName":"resourceArn" } @@ -95,13 +378,55 @@ "members":{ "tags":{ "shape":"TagMap", - "documentation":"The tags of the Elastic Inference Accelerator." + "documentation":"

The tags of the Elastic Inference Accelerator.

" } } }, + "Location":{ + "type":"string", + "max":256, + "min":1 + }, + "LocationType":{ + "type":"string", + "enum":[ + "region", + "availability-zone", + "availability-zone-id" + ], + "max":256, + "min":1 + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":0 + }, + "MemoryInfo":{ + "type":"structure", + "members":{ + "sizeInMiB":{ + "shape":"Integer", + "documentation":"

The size in mebibytes of the Elastic Inference Accelerator type.

" + } + }, + "documentation":"

The memory information of an Elastic Inference Accelerator type.

" + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[A-Za-z0-9+/]+={0,2}$" + }, "ResourceARN":{ "type":"string", "max":1011, + "min":1, + "pattern":"^arn:aws\\S*:elastic-inference:\\S+:\\d{12}:elastic-inference-accelerator/eia-[0-9a-f]+$" + }, + "ResourceArn":{ + "type":"string", + "max":1283, "min":1 }, "ResourceNotFoundException":{ @@ -109,15 +434,20 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"Raised when the requested resource cannot be found.", + "documentation":"

Raised when the requested resource cannot be found.

", "error":{"httpStatusCode":404}, "exception":true }, - "String":{"type":"string"}, + "String":{ + "type":"string", + "max":500000, + "pattern":"^.*$" + }, "TagKey":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":"^\\S$" }, "TagKeyList":{ "type":"list", @@ -141,13 +471,13 @@ "members":{ "resourceArn":{ "shape":"ResourceARN", - "documentation":"The ARN of the Elastic Inference Accelerator to tag.", + "documentation":"

The ARN of the Elastic Inference Accelerator to tag.

", "location":"uri", "locationName":"resourceArn" }, "tags":{ "shape":"TagMap", - "documentation":"The tags to add to the Elastic Inference Accelerator." + "documentation":"

The tags to add to the Elastic Inference Accelerator.

" } } }, @@ -160,6 +490,12 @@ "type":"string", "max":256 }, + "ThroughputInfoList":{ + "type":"list", + "member":{"shape":"KeyValuePair"}, + "max":100, + "min":0 + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -169,13 +505,13 @@ "members":{ "resourceArn":{ "shape":"ResourceARN", - "documentation":"The ARN of the Elastic Inference Accelerator to untag.", + "documentation":"

The ARN of the Elastic Inference Accelerator to untag.

", "location":"uri", "locationName":"resourceArn" }, "tagKeys":{ "shape":"TagKeyList", - "documentation":"The list of tags to remove from the Elastic Inference Accelerator.", + "documentation":"

The list of tags to remove from the Elastic Inference Accelerator.

", "location":"querystring", "locationName":"tagKeys" } @@ -185,7 +521,14 @@ "type":"structure", "members":{ } + }, + "Value":{"type":"integer"}, + "ValueStringList":{ + "type":"list", + "member":{"shape":"String"}, + "max":100, + "min":0 } }, - "documentation":"Elastic Inference public APIs." + "documentation":"

Elastic Inference public APIs.

" } diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 06722c0d3a8f..ae703b53f1e9 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index 0513c0e68a99..8e791e1684dd 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json index 52c0ab9d3511..2b0582d3b3a4 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json @@ -76,7 +76,8 @@ {"shape":"TooManyTargetsException"}, {"shape":"TooManyActionsException"}, {"shape":"InvalidLoadBalancerActionException"}, - {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} + {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"}, + {"shape":"ALPNPolicyNotSupportedException"} ], "documentation":"

Creates a listener for the specified Application Load Balancer or Network Load Balancer.

To update a listener, use ModifyListener. When you are finished with a listener, you can delete it using DeleteListener. If you are finished with both the listener and the load balancer, you can delete them both using DeleteLoadBalancer.

This operation is idempotent, which means that it completes at most one time. If you attempt to create multiple listeners with the same settings, each call succeeds.

For more information, see Listeners for Your Application Load Balancers in the Application Load Balancers Guide and Listeners for Your Network Load Balancers in the Network Load Balancers Guide.

" }, @@ -449,7 +450,8 @@ {"shape":"TooManyTargetsException"}, {"shape":"TooManyActionsException"}, {"shape":"InvalidLoadBalancerActionException"}, - {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} + {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"}, + {"shape":"ALPNPolicyNotSupportedException"} ], "documentation":"

Replaces the specified properties of the specified listener. Any properties that you do not specify remain unchanged.

Changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the security policy and default certificate properties. If you change the protocol from HTTP to HTTPS, or from TCP to TLS, you must add the security policy and default certificate properties.

To add an item to a list, remove an item from a list, or update an item in a list, you must provide the entire list. For example, to add an action, specify a list with the current actions plus the new action.

" }, @@ -663,6 +665,18 @@ } }, "shapes":{ + "ALPNPolicyNotSupportedException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified ALPN policy is not supported.

", + "error":{ + "code":"ALPNPolicyNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "Action":{ "type":"structure", "required":["Type"], @@ -782,6 +796,11 @@ }, "exception":true }, + "AlpnPolicyName":{ + "type":"list", + "member":{"shape":"AlpnPolicyValue"} + }, + "AlpnPolicyValue":{"type":"string"}, "AuthenticateCognitoActionAuthenticationRequestExtraParams":{ "type":"map", "key":{"shape":"AuthenticateCognitoActionAuthenticationRequestParamName"}, @@ -1054,6 +1073,10 @@ "DefaultActions":{ "shape":"Actions", "documentation":"

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" + }, + "AlpnPolicy":{ + "shape":"AlpnPolicyName", + "documentation":"

[TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values:

  • HTTP1Only

  • HTTP2Only

  • HTTP2Optional

  • HTTP2Preferred

  • None

For more information, see ALPN Policies in the Network Load Balancers Guide.

" } } }, @@ -1917,6 +1940,10 @@ "DefaultActions":{ "shape":"Actions", "documentation":"

The default actions for the listener.

" + }, + "AlpnPolicy":{ + "shape":"AlpnPolicyName", + "documentation":"

[TLS listener] The name of the Application-Layer Protocol Negotiation (ALPN) policy.

" } }, "documentation":"

Information about a listener.

" @@ -2149,6 +2176,10 @@ "DefaultActions":{ "shape":"Actions", "documentation":"

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

" + }, + "AlpnPolicy":{ + "shape":"AlpnPolicyName", + "documentation":"

[TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values:

  • HTTP1Only

  • HTTP2Only

  • HTTP2Optional

  • HTTP2Preferred

  • None

For more information, see ALPN Policies in the Network Load Balancers Guide.

" } } }, @@ -3006,7 +3037,7 @@ "members":{ "Key":{ "shape":"TargetGroupAttributeKey", - "documentation":"

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.

  • stickiness.enabled - Indicates whether sticky sessions are enabled. The value is true or false. The default is false.

  • stickiness.type - The type of sticky sessions. The possible values are lb_cookie for Application Load Balancers or source_ip for Network Load Balancers.

The following attributes are supported by Application Load Balancers if the target is not a Lambda function:

  • load_balancing.algorithm.type - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin or least_outstanding_requests. The default is round_robin.

  • slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives a linearly increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). Slow start mode is disabled by default.

  • stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

The following attribute is supported only if the target is a Lambda function.

  • lambda.multi_value_headers.enabled - Indicates whether the request and response headers exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true or false. The default is false. If the value is false and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.

The following attribute is supported only by Network Load Balancers:

  • proxy_protocol_v2.enabled - Indicates whether Proxy Protocol version 2 is enabled. The value is true or false. The default is false.

" + "documentation":"

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

  • deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.

  • stickiness.enabled - Indicates whether sticky sessions are enabled. The value is true or false. The default is false.

  • stickiness.type - The type of sticky sessions. The possible values are lb_cookie for Application Load Balancers or source_ip for Network Load Balancers.

The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

  • load_balancing.algorithm.type - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin or least_outstanding_requests. The default is round_robin.

  • slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). Slow start mode is disabled by default.

  • stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

  • lambda.multi_value_headers.enabled - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true or false. The default is false. If the value is false and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.

The following attribute is supported only by Network Load Balancers:

  • proxy_protocol_v2.enabled - Indicates whether Proxy Protocol version 2 is enabled. The value is true or false. The default is false.

" }, "Value":{ "shape":"TargetGroupAttributeValue", diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index 584415ae6293..1c3427319856 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elasticsearch/src/main/resources/codegen-resources/paginators-1.json b/services/elasticsearch/src/main/resources/codegen-resources/paginators-1.json index e360c36eb9ac..6a5c91f1c0a3 100755 --- a/services/elasticsearch/src/main/resources/codegen-resources/paginators-1.json +++ b/services/elasticsearch/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,15 @@ { "pagination": { + "DescribeInboundCrossClusterSearchConnections": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "DescribeOutboundCrossClusterSearchConnections": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "DescribePackages": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/elasticsearch/src/main/resources/codegen-resources/service-2.json b/services/elasticsearch/src/main/resources/codegen-resources/service-2.json index 5bd871c9a7ee..5e960abc39ec 100644 --- a/services/elasticsearch/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticsearch/src/main/resources/codegen-resources/service-2.json @@ -10,6 +10,21 @@ "uid":"es-2015-01-01" }, "operations":{ + "AcceptInboundCrossClusterSearchConnection":{ + "name":"AcceptInboundCrossClusterSearchConnection", + "http":{ + "method":"PUT", + "requestUri":"/2015-01-01/es/ccs/inboundConnection/{ConnectionId}/accept" + }, + "input":{"shape":"AcceptInboundCrossClusterSearchConnectionRequest"}, + "output":{"shape":"AcceptInboundCrossClusterSearchConnectionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Allows the destination domain owner to accept an inbound cross-cluster search connection request.

" + }, "AddTags":{ "name":"AddTags", "http":{ @@ -78,6 +93,22 @@ ], "documentation":"

Creates a new Elasticsearch domain. For more information, see Creating Elasticsearch Domains in the Amazon Elasticsearch Service Developer Guide.

" }, + "CreateOutboundCrossClusterSearchConnection":{ + "name":"CreateOutboundCrossClusterSearchConnection", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/ccs/outboundConnection" + }, + "input":{"shape":"CreateOutboundCrossClusterSearchConnectionRequest"}, + "output":{"shape":"CreateOutboundCrossClusterSearchConnectionResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InternalException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Creates a new cross-cluster search connection from a source domain to a destination domain.

" + }, "CreatePackage":{ "name":"CreatePackage", "http":{ @@ -126,6 +157,34 @@ ], "documentation":"

Deletes the service-linked role that Elasticsearch Service uses to manage and maintain VPC domains. Role deletion will fail if any existing VPC domains use the role. You must delete any such Elasticsearch domains before deleting the role. See Deleting Elasticsearch Service Role in VPC Endpoints for Amazon Elasticsearch Service Domains.

" }, + "DeleteInboundCrossClusterSearchConnection":{ + "name":"DeleteInboundCrossClusterSearchConnection", + "http":{ + "method":"DELETE", + "requestUri":"/2015-01-01/es/ccs/inboundConnection/{ConnectionId}" + }, + "input":{"shape":"DeleteInboundCrossClusterSearchConnectionRequest"}, + "output":{"shape":"DeleteInboundCrossClusterSearchConnectionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Allows the destination domain owner to delete an existing inbound cross-cluster search connection.

" + }, + "DeleteOutboundCrossClusterSearchConnection":{ + "name":"DeleteOutboundCrossClusterSearchConnection", + "http":{ + "method":"DELETE", + "requestUri":"/2015-01-01/es/ccs/outboundConnection/{ConnectionId}" + }, + "input":{"shape":"DeleteOutboundCrossClusterSearchConnectionRequest"}, + "output":{"shape":"DeleteOutboundCrossClusterSearchConnectionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Allows the source domain owner to delete an existing outbound cross-cluster search connection.

" + }, "DeletePackage":{ "name":"DeletePackage", "http":{ @@ -209,6 +268,34 @@ ], "documentation":"

Describe Elasticsearch Limits for a given InstanceType and ElasticsearchVersion. When modifying existing Domain, specify the DomainName to know what Limits are supported for modifying.

" }, + "DescribeInboundCrossClusterSearchConnections":{ + "name":"DescribeInboundCrossClusterSearchConnections", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/ccs/inboundConnection/search" + }, + "input":{"shape":"DescribeInboundCrossClusterSearchConnectionsRequest"}, + "output":{"shape":"DescribeInboundCrossClusterSearchConnectionsResponse"}, + "errors":[ + {"shape":"InvalidPaginationTokenException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Lists all the inbound cross-cluster search connections for a destination domain.

" + }, + "DescribeOutboundCrossClusterSearchConnections":{ + "name":"DescribeOutboundCrossClusterSearchConnections", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/ccs/outboundConnection/search" + }, + "input":{"shape":"DescribeOutboundCrossClusterSearchConnectionsRequest"}, + "output":{"shape":"DescribeOutboundCrossClusterSearchConnectionsResponse"}, + "errors":[ + {"shape":"InvalidPaginationTokenException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Lists all the outbound cross-cluster search connections for a source domain.

" + }, "DescribePackages":{ "name":"DescribePackages", "http":{ @@ -440,6 +527,20 @@ ], "documentation":"

Allows you to purchase reserved Elasticsearch instances.

" }, + "RejectInboundCrossClusterSearchConnection":{ + "name":"RejectInboundCrossClusterSearchConnection", + "http":{ + "method":"PUT", + "requestUri":"/2015-01-01/es/ccs/inboundConnection/{ConnectionId}/reject" + }, + "input":{"shape":"RejectInboundCrossClusterSearchConnectionRequest"}, + "output":{"shape":"RejectInboundCrossClusterSearchConnectionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Allows the destination domain owner to reject an inbound cross-cluster search connection request.

" + }, "RemoveTags":{ "name":"RemoveTags", "http":{ @@ -512,6 +613,29 @@ "type":"string", "documentation":"

The Amazon Resource Name (ARN) of the Elasticsearch domain. See Identifiers for IAM Entities in Using AWS Identity and Access Management for more information.

" }, + "AcceptInboundCrossClusterSearchConnectionRequest":{ + "type":"structure", + "required":["CrossClusterSearchConnectionId"], + "members":{ + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

The id of the inbound connection that you want to accept.

", + "location":"uri", + "locationName":"ConnectionId" + } + }, + "documentation":"

Container for the parameters to the AcceptInboundCrossClusterSearchConnection operation.

" + }, + "AcceptInboundCrossClusterSearchConnectionResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnection":{ + "shape":"InboundCrossClusterSearchConnection", + "documentation":"

Specifies the InboundCrossClusterSearchConnection of accepted inbound connection.

" + } + }, + "documentation":"

The result of a AcceptInboundCrossClusterSearchConnection operation. Contains details of accepted inbound connection.

" + }, "AccessDeniedException":{ "type":"structure", "members":{ @@ -780,6 +904,10 @@ "error":{"httpStatusCode":409}, "exception":true }, + "ConnectionAlias":{ + "type":"string", + "max":20 + }, "CreateElasticsearchDomainRequest":{ "type":"structure", "required":["DomainName"], @@ -852,6 +980,55 @@ }, "documentation":"

The result of a CreateElasticsearchDomain operation. Contains the status of the newly created Elasticsearch domain.

" }, + "CreateOutboundCrossClusterSearchConnectionRequest":{ + "type":"structure", + "required":[ + "SourceDomainInfo", + "DestinationDomainInfo", + "ConnectionAlias" + ], + "members":{ + "SourceDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the source Elasticsearch domain.

" + }, + "DestinationDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the destination Elasticsearch domain.

" + }, + "ConnectionAlias":{ + "shape":"ConnectionAlias", + "documentation":"

Specifies the connection alias that will be used by the customer for this connection.

" + } + }, + "documentation":"

Container for the parameters to the CreateOutboundCrossClusterSearchConnection operation.

" + }, + "CreateOutboundCrossClusterSearchConnectionResponse":{ + "type":"structure", + "members":{ + "SourceDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the source Elasticsearch domain.

" + }, + "DestinationDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the destination Elasticsearch domain.

" + }, + "ConnectionAlias":{ + "shape":"ConnectionAlias", + "documentation":"

Specifies the connection alias provided during the create connection request.

" + }, + "ConnectionStatus":{ + "shape":"OutboundCrossClusterSearchConnectionStatus", + "documentation":"

Specifies the OutboundCrossClusterSearchConnectionStatus for the newly created connection.

" + }, + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

Unique id for the created outbound connection, which is used for subsequent operations on connection.

" + } + }, + "documentation":"

The result of a CreateOutboundCrossClusterSearchConnection request. Contains the details of the newly created cross-cluster search connection.

" + }, "CreatePackageRequest":{ "type":"structure", "required":[ @@ -890,6 +1067,8 @@ "documentation":"

Container for response returned by CreatePackage operation.

" }, "CreatedAt":{"type":"timestamp"}, + "CrossClusterSearchConnectionId":{"type":"string"}, + "CrossClusterSearchConnectionStatusMessage":{"type":"string"}, "DeleteElasticsearchDomainRequest":{ "type":"structure", "required":["DomainName"], @@ -913,6 +1092,52 @@ }, "documentation":"

The result of a DeleteElasticsearchDomain request. Contains the status of the pending deletion, or no status if the domain and all of its resources have been deleted.

" }, + "DeleteInboundCrossClusterSearchConnectionRequest":{ + "type":"structure", + "required":["CrossClusterSearchConnectionId"], + "members":{ + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

The id of the inbound connection that you want to permanently delete.

", + "location":"uri", + "locationName":"ConnectionId" + } + }, + "documentation":"

Container for the parameters to the DeleteInboundCrossClusterSearchConnection operation.

" + }, + "DeleteInboundCrossClusterSearchConnectionResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnection":{ + "shape":"InboundCrossClusterSearchConnection", + "documentation":"

Specifies the InboundCrossClusterSearchConnection of deleted inbound connection.

" + } + }, + "documentation":"

The result of a DeleteInboundCrossClusterSearchConnection operation. Contains details of deleted inbound connection.

" + }, + "DeleteOutboundCrossClusterSearchConnectionRequest":{ + "type":"structure", + "required":["CrossClusterSearchConnectionId"], + "members":{ + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

The id of the outbound connection that you want to permanently delete.

", + "location":"uri", + "locationName":"ConnectionId" + } + }, + "documentation":"

Container for the parameters to the DeleteOutboundCrossClusterSearchConnection operation.

" + }, + "DeleteOutboundCrossClusterSearchConnectionResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnection":{ + "shape":"OutboundCrossClusterSearchConnection", + "documentation":"

Specifies the OutboundCrossClusterSearchConnection of deleted outbound connection.

" + } + }, + "documentation":"

The result of a DeleteOutboundCrossClusterSearchConnection operation. Contains details of deleted outbound connection.

" + }, "DeletePackageRequest":{ "type":"structure", "required":["PackageID"], @@ -1052,6 +1277,70 @@ }, "documentation":"

Container for the parameters received from DescribeElasticsearchInstanceTypeLimits operation.

" }, + "DescribeInboundCrossClusterSearchConnectionsRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

A list of filters used to match properties for inbound cross-cluster search connection. Available Filter names for this operation are:

  • cross-cluster-search-connection-id
  • source-domain-info.domain-name
  • source-domain-info.owner-id
  • source-domain-info.region
  • destination-domain-info.domain-name

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Set this value to limit the number of results returned. If not specified, defaults to 100.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

NextToken is sent in case the earlier API call results contain the NextToken. It is used for pagination.

" + } + }, + "documentation":"

Container for the parameters to the DescribeInboundCrossClusterSearchConnections operation.

" + }, + "DescribeInboundCrossClusterSearchConnectionsResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnections":{ + "shape":"InboundCrossClusterSearchConnections", + "documentation":"

Consists of list of InboundCrossClusterSearchConnection matching the specified filter criteria.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If more results are available and NextToken is present, make the next request to the same API with the received NextToken to paginate the remaining results.

" + } + }, + "documentation":"

The result of a DescribeInboundCrossClusterSearchConnections request. Contains the list of connections matching the filter criteria.

" + }, + "DescribeOutboundCrossClusterSearchConnectionsRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

A list of filters used to match properties for outbound cross-cluster search connection. Available Filter names for this operation are:

  • cross-cluster-search-connection-id
  • destination-domain-info.domain-name
  • destination-domain-info.owner-id
  • destination-domain-info.region
  • source-domain-info.domain-name

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Set this value to limit the number of results returned. If not specified, defaults to 100.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

NextToken is sent in case the earlier API call results contain the NextToken. It is used for pagination.

" + } + }, + "documentation":"

Container for the parameters to the DescribeOutboundCrossClusterSearchConnections operation.

" + }, + "DescribeOutboundCrossClusterSearchConnectionsResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnections":{ + "shape":"OutboundCrossClusterSearchConnections", + "documentation":"

Consists of list of OutboundCrossClusterSearchConnection matching the specified filter criteria.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If more results are available and NextToken is present, make the next request to the same API with the received NextToken to paginate the remaining results.

" + } + }, + "documentation":"

The result of a DescribeOutboundCrossClusterSearchConnections request. Contains the list of connections matching the filter criteria.

" + }, "DescribePackagesFilter":{ "type":"structure", "members":{ @@ -1284,6 +1573,15 @@ "member":{"shape":"DomainInfo"}, "documentation":"

Contains the list of Elasticsearch domain information.

" }, + "DomainInformation":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "OwnerId":{"shape":"OwnerId"}, + "DomainName":{"shape":"DomainName"}, + "Region":{"shape":"Region"} + } + }, "DomainName":{ "type":"string", "documentation":"

The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).

", @@ -1762,6 +2060,24 @@ }, "ErrorMessage":{"type":"string"}, "ErrorType":{"type":"string"}, + "Filter":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"

Specifies the name of the filter.

" + }, + "Values":{ + "shape":"ValueStringList", + "documentation":"

Contains one or more values for the filter.

" + } + }, + "documentation":"

A filter used to limit results when describing inbound or outbound cross-cluster search connections. Multiple values can be specified per filter. A cross-cluster search connection must match at least one of the specified values for it to be returned from an operation.

" + }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"} + }, "GUID":{ "type":"string", "pattern":"\\p{XDigit}{8}-\\p{XDigit}{4}-\\p{XDigit}{4}-\\p{XDigit}{4}-\\p{XDigit}{12}" @@ -1859,6 +2175,57 @@ "min":1, "pattern":"[\\w-]+:[0-9a-f-]+" }, + "InboundCrossClusterSearchConnection":{ + "type":"structure", + "members":{ + "SourceDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the source Elasticsearch domain.

" + }, + "DestinationDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the destination Elasticsearch domain.

" + }, + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

Specifies the connection id for the inbound cross-cluster search connection.

" + }, + "ConnectionStatus":{ + "shape":"InboundCrossClusterSearchConnectionStatus", + "documentation":"

Specifies the InboundCrossClusterSearchConnectionStatus for the outbound connection.

" + } + }, + "documentation":"

Specifies details of an inbound connection.

" + }, + "InboundCrossClusterSearchConnectionStatus":{ + "type":"structure", + "members":{ + "StatusCode":{ + "shape":"InboundCrossClusterSearchConnectionStatusCode", + "documentation":"

The state code for inbound connection. This can be one of the following:

  • PENDING_ACCEPTANCE: Inbound connection is not yet accepted by destination domain owner.
  • APPROVED: Inbound connection is pending acceptance by destination domain owner.
  • REJECTING: Inbound connection rejection is in process.
  • REJECTED: Inbound connection is rejected.
  • DELETING: Inbound connection deletion is in progress.
  • DELETED: Inbound connection is deleted and cannot be used further.
" + }, + "Message":{ + "shape":"CrossClusterSearchConnectionStatusMessage", + "documentation":"

Specifies verbose information for the inbound connection status.

" + } + }, + "documentation":"

Specifies the coonection status of an inbound cross-cluster search connection.

" + }, + "InboundCrossClusterSearchConnectionStatusCode":{ + "type":"string", + "enum":[ + "PENDING_ACCEPTANCE", + "APPROVED", + "REJECTING", + "REJECTED", + "DELETING", + "DELETED" + ] + }, + "InboundCrossClusterSearchConnections":{ + "type":"list", + "member":{"shape":"InboundCrossClusterSearchConnection"} + }, "InstanceCount":{ "type":"integer", "documentation":"

Specifies the number of EC2 instances in the Elasticsearch domain.

", @@ -1890,6 +2257,14 @@ "error":{"httpStatusCode":500}, "exception":true }, + "InvalidPaginationTokenException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The request processing has failed because of invalid pagination token provided by customer. Returns an HTTP status code of 400.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "InvalidTypeException":{ "type":"structure", "members":{ @@ -2224,6 +2599,10 @@ }, "documentation":"

Status of the node-to-node encryption options for the specified Elasticsearch domain.

" }, + "NonEmptyString":{ + "type":"string", + "min":1 + }, "OptionState":{ "type":"string", "documentation":"

The state of a requested change. One of the following:

  • Processing: The request change is still in-process.
  • Active: The request change is processed and deployed to the Elasticsearch domain.
", @@ -2264,6 +2643,68 @@ }, "documentation":"

Provides the current status of the entity.

" }, + "OutboundCrossClusterSearchConnection":{ + "type":"structure", + "members":{ + "SourceDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the source Elasticsearch domain.

" + }, + "DestinationDomainInfo":{ + "shape":"DomainInformation", + "documentation":"

Specifies the DomainInformation for the destination Elasticsearch domain.

" + }, + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

Specifies the connection id for the outbound cross-cluster search connection.

" + }, + "ConnectionAlias":{ + "shape":"ConnectionAlias", + "documentation":"

Specifies the connection alias for the outbound cross-cluster search connection.

" + }, + "ConnectionStatus":{ + "shape":"OutboundCrossClusterSearchConnectionStatus", + "documentation":"

Specifies the OutboundCrossClusterSearchConnectionStatus for the outbound connection.

" + } + }, + "documentation":"

Specifies details of an outbound connection.

" + }, + "OutboundCrossClusterSearchConnectionStatus":{ + "type":"structure", + "members":{ + "StatusCode":{ + "shape":"OutboundCrossClusterSearchConnectionStatusCode", + "documentation":"

The state code for outbound connection. This can be one of the following:

  • VALIDATING: The outbound connection request is being validated.
  • VALIDATION_FAILED: Validation failed for the connection request.
  • PENDING_ACCEPTANCE: Outbound connection request is validated and is not yet accepted by destination domain owner.
  • PROVISIONING: Outbound connection request is in process.
  • ACTIVE: Outbound connection is active and ready to use.
  • REJECTED: Outbound connection request is rejected by destination domain owner.
  • DELETING: Outbound connection deletion is in progress.
  • DELETED: Outbound connection is deleted and cannot be used further.
" + }, + "Message":{ + "shape":"CrossClusterSearchConnectionStatusMessage", + "documentation":"

Specifies verbose information for the outbound connection status.

" + } + }, + "documentation":"

Specifies the connection status of an outbound cross-cluster search connection.

" + }, + "OutboundCrossClusterSearchConnectionStatusCode":{ + "type":"string", + "enum":[ + "PENDING_ACCEPTANCE", + "VALIDATING", + "VALIDATION_FAILED", + "PROVISIONING", + "ACTIVE", + "REJECTED", + "DELETING", + "DELETED" + ] + }, + "OutboundCrossClusterSearchConnections":{ + "type":"list", + "member":{"shape":"OutboundCrossClusterSearchConnection"} + }, + "OwnerId":{ + "type":"string", + "max":12, + "min":12 + }, "PackageDescription":{ "type":"string", "max":1024 @@ -2408,6 +2849,30 @@ "member":{"shape":"RecurringCharge"} }, "ReferencePath":{"type":"string"}, + "Region":{"type":"string"}, + "RejectInboundCrossClusterSearchConnectionRequest":{ + "type":"structure", + "required":["CrossClusterSearchConnectionId"], + "members":{ + "CrossClusterSearchConnectionId":{ + "shape":"CrossClusterSearchConnectionId", + "documentation":"

The id of the inbound connection that you want to reject.

", + "location":"uri", + "locationName":"ConnectionId" + } + }, + "documentation":"

Container for the parameters to the RejectInboundCrossClusterSearchConnection operation.

" + }, + "RejectInboundCrossClusterSearchConnectionResponse":{ + "type":"structure", + "members":{ + "CrossClusterSearchConnection":{ + "shape":"InboundCrossClusterSearchConnection", + "documentation":"

Specifies the InboundCrossClusterSearchConnection of rejected inbound connection.

" + } + }, + "documentation":"

The result of a RejectInboundCrossClusterSearchConnection operation. Contains details of rejected inbound connection.

" + }, "RemoveTagsRequest":{ "type":"structure", "required":[ @@ -2600,6 +3065,10 @@ "AutomatedUpdateDate":{ "shape":"DeploymentCloseDateTimeStamp", "documentation":"

Timestamp, in Epoch time, until which you can manually request a service software update. After this date, we automatically update your service software.

" + }, + "OptionalDeployment":{ + "shape":"Boolean", + "documentation":"

True if a service software is never automatically updated. False if a service software is automatically updated after AutomatedUpdateDate.

" } }, "documentation":"

The current options of an Elasticsearch domain service software options.

" @@ -2993,6 +3462,11 @@ "error":{"httpStatusCode":400}, "exception":true }, + "ValueStringList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "min":1 + }, "VolumeType":{ "type":"string", "documentation":"

The type of EBS volume, standard, gp2, or io1. See Configuring EBS-based Storagefor more information.

", diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index c2e198e01063..3d6c3488c17f 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/emr/pom.xml b/services/emr/pom.xml index 84631ffbce5f..4abfbe1547ec 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emr/src/main/resources/codegen-resources/service-2.json b/services/emr/src/main/resources/codegen-resources/service-2.json index 8a42b4f5b625..1b4bdd199e81 100644 --- a/services/emr/src/main/resources/codegen-resources/service-2.json +++ b/services/emr/src/main/resources/codegen-resources/service-2.json @@ -179,6 +179,16 @@ ], "documentation":"

Returns the Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

" }, + "GetManagedScalingPolicy":{ + "name":"GetManagedScalingPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetManagedScalingPolicyInput"}, + "output":{"shape":"GetManagedScalingPolicyOutput"}, + "documentation":"

Fetches the attached managed scaling policy for an Amazon EMR cluster.

" + }, "ListBootstrapActions":{ "name":"ListBootstrapActions", "http":{ @@ -340,6 +350,16 @@ ], "documentation":"

Creates or updates an Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

" }, + "PutManagedScalingPolicy":{ + "name":"PutManagedScalingPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutManagedScalingPolicyInput"}, + "output":{"shape":"PutManagedScalingPolicyOutput"}, + "documentation":"

Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" + }, "RemoveAutoScalingPolicy":{ "name":"RemoveAutoScalingPolicy", "http":{ @@ -350,6 +370,16 @@ "output":{"shape":"RemoveAutoScalingPolicyOutput"}, "documentation":"

Removes an automatic scaling policy from a specified instance group within an EMR cluster.

" }, + "RemoveManagedScalingPolicy":{ + "name":"RemoveManagedScalingPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveManagedScalingPolicyInput"}, + "output":{"shape":"RemoveManagedScalingPolicyOutput"}, + "documentation":"

Removes a managed scaling policy from a specified EMR cluster.

" + }, "RemoveTags":{ "name":"RemoveTags", "http":{ @@ -875,6 +905,10 @@ "shape":"String", "documentation":"

The path to the Amazon S3 location where logs for this cluster are stored.

" }, + "LogEncryptionKmsKeyId":{ + "shape":"String", + "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" + }, "RequestedAmiVersion":{ "shape":"String", "documentation":"

The AMI version requested for this cluster.

" @@ -955,13 +989,13 @@ "shape":"ArnType", "documentation":"

The Amazon Resource Name of the cluster.

" }, - "StepConcurrencyLevel":{ - "shape":"Integer", - "documentation":"

Specifies the number of steps that can be executed concurrently.

" - }, "OutpostArn":{ "shape":"OptionalArnType", "documentation":"

The Amazon Resource Name (ARN) of the Outpost where the cluster is launched.

" + }, + "StepConcurrencyLevel":{ + "shape":"Integer", + "documentation":"

Specifies the number of steps that can be executed concurrently.

" } }, "documentation":"

The detailed description of the cluster.

" @@ -1111,6 +1145,45 @@ "LESS_THAN_OR_EQUAL" ] }, + "ComputeLimits":{ + "type":"structure", + "required":[ + "UnitType", + "MinimumCapacityUnits", + "MaximumCapacityUnits" + ], + "members":{ + "UnitType":{ + "shape":"ComputeLimitsUnitType", + "documentation":"

The unit type used for specifying a managed scaling policy.

" + }, + "MinimumCapacityUnits":{ + "shape":"Integer", + "documentation":"

The lower boundary of EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" + }, + "MaximumCapacityUnits":{ + "shape":"Integer", + "documentation":"

The upper boundary of EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" + }, + "MaximumOnDemandCapacityUnits":{ + "shape":"Integer", + "documentation":"

The upper boundary of On-Demand EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot instances.

" + }, + "MaximumCoreCapacityUnits":{ + "shape":"Integer", + "documentation":"

The upper boundary of EC2 units for core node type in a cluster. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes.

" + } + }, + "documentation":"

The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster can not be above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" + }, + "ComputeLimitsUnitType":{ + "type":"string", + "enum":[ + "InstanceFleetUnits", + "Instances", + "VCPU" + ] + }, "Configuration":{ "type":"structure", "members":{ @@ -1456,7 +1529,7 @@ "members":{ "BlockPublicAccessConfiguration":{ "shape":"BlockPublicAccessConfiguration", - "documentation":"

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating the block public access configuration to remove the exception.

" + "documentation":"

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating the block public access configuration to remove the exception.

For accounts that created clusters in a Region before November 25, 2019, block public access is disabled by default in that Region. To use this feature, you must manually enable and configure it. For accounts that did not create an EMR cluster in a Region before this date, block public access is enabled by default in that Region.

" }, "BlockPublicAccessConfigurationMetadata":{ "shape":"BlockPublicAccessConfigurationMetadata", @@ -1464,6 +1537,25 @@ } } }, + "GetManagedScalingPolicyInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

Specifies the ID of the cluster for which the managed scaling policy will be fetched.

" + } + } + }, + "GetManagedScalingPolicyOutput":{ + "type":"structure", + "members":{ + "ManagedScalingPolicy":{ + "shape":"ManagedScalingPolicy", + "documentation":"

Specifies the managed scaling policy that is attached to an Amazon EMR cluster.

" + } + } + }, "HadoopJarStepConfig":{ "type":"structure", "required":["Jar"], @@ -1677,14 +1769,17 @@ }, "InstanceFleetProvisioningSpecifications":{ "type":"structure", - "required":["SpotSpecification"], "members":{ "SpotSpecification":{ "shape":"SpotProvisioningSpecification", - "documentation":"

The launch specification for Spot instances in the fleet, which determines the defined duration and provisioning timeout behavior.

" + "documentation":"

The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

" + }, + "OnDemandSpecification":{ + "shape":"OnDemandProvisioningSpecification", + "documentation":"

The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand instances allocation strategy is available in Amazon EMR version 5.12.1 and later.

" } }, - "documentation":"

The launch specification for Spot instances in the fleet, which determines the defined duration and provisioning timeout behavior.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

" + "documentation":"

The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand and Spot instance allocation strategies are available in Amazon EMR version 5.12.1 and later.

" }, "InstanceFleetState":{ "type":"string", @@ -2332,6 +2427,10 @@ "shape":"XmlString", "documentation":"

The location in Amazon S3 where log files for the job are stored.

" }, + "LogEncryptionKmsKeyId":{ + "shape":"XmlString", + "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" + }, "AmiVersion":{ "shape":"XmlStringMaxLen256", "documentation":"

Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, ReleaseLabel is used. To specify a custom AMI, use CustomAmiID.

" @@ -2850,6 +2949,16 @@ "documentation":"

This output contains the list of steps returned in reverse order. This means that the last step is the first element in the list.

" }, "Long":{"type":"long"}, + "ManagedScalingPolicy":{ + "type":"structure", + "members":{ + "ComputeLimits":{ + "shape":"ComputeLimits", + "documentation":"

The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster is not allowed to go above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" + } + }, + "documentation":"

Managed scaling policy for an Amazon EMR cluster. The policy specifies the limits for resources that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

" + }, "Marker":{"type":"string"}, "MarketType":{ "type":"string", @@ -2938,6 +3047,21 @@ "type":"double", "min":0.0 }, + "OnDemandProvisioningAllocationStrategy":{ + "type":"string", + "enum":["lowest-price"] + }, + "OnDemandProvisioningSpecification":{ + "type":"structure", + "required":["AllocationStrategy"], + "members":{ + "AllocationStrategy":{ + "shape":"OnDemandProvisioningAllocationStrategy", + "documentation":"

Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first.

" + } + }, + "documentation":"

The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand instances allocation strategy is available in Amazon EMR version 5.12.1 and later.

" + }, "OptionalArnType":{ "type":"string", "max":2048, @@ -3030,7 +3154,7 @@ "members":{ "BlockPublicAccessConfiguration":{ "shape":"BlockPublicAccessConfiguration", - "documentation":"

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating BlockPublicSecurityGroupRules to remove the exception.

" + "documentation":"

A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating BlockPublicSecurityGroupRules to remove the exception.

For accounts that created clusters in a Region before November 25, 2019, block public access is disabled by default in that Region. To use this feature, you must manually enable and configure it. For accounts that did not create an EMR cluster in a Region before this date, block public access is enabled by default in that Region.

" } } }, @@ -3039,6 +3163,28 @@ "members":{ } }, + "PutManagedScalingPolicyInput":{ + "type":"structure", + "required":[ + "ClusterId", + "ManagedScalingPolicy" + ], + "members":{ + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

Specifies the ID of an EMR cluster where the managed scaling policy is attached.

" + }, + "ManagedScalingPolicy":{ + "shape":"ManagedScalingPolicy", + "documentation":"

Specifies the constraints for the managed scaling policy.

" + } + } + }, + "PutManagedScalingPolicyOutput":{ + "type":"structure", + "members":{ + } + }, "RemoveAutoScalingPolicyInput":{ "type":"structure", "required":[ @@ -3061,6 +3207,21 @@ "members":{ } }, + "RemoveManagedScalingPolicyInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{ + "shape":"ClusterId", + "documentation":"

Specifies the ID of the cluster from which the managed scaling policy will be removed.

" + } + } + }, + "RemoveManagedScalingPolicyOutput":{ + "type":"structure", + "members":{ + } + }, "RemoveTagsInput":{ "type":"structure", "required":[ @@ -3108,6 +3269,10 @@ "shape":"XmlString", "documentation":"

The location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created.

" }, + "LogEncryptionKmsKeyId":{ + "shape":"XmlString", + "documentation":"

The AWS KMS customer master key (CMK) used for encrypting log files. If a value is not provided, the logs will remain encrypted by AES-256. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

" + }, "AdditionalInfo":{ "shape":"XmlString", "documentation":"

A JSON string for selecting additional features.

" @@ -3195,6 +3360,10 @@ "StepConcurrencyLevel":{ "shape":"Integer", "documentation":"

Specifies the number of steps that can be executed concurrently. The default value is 1. The maximum value is 256.

" + }, + "ManagedScalingPolicy":{ + "shape":"ManagedScalingPolicy", + "documentation":"

The specified managed scaling policy for an Amazon EMR cluster.

" } }, "documentation":"

Input to the RunJobFlow operation.

" @@ -3401,6 +3570,10 @@ }, "documentation":"

An automatic scaling configuration, which describes how the policy adds or removes instances, the cooldown period, and the number of EC2 instances that will be added each time the CloudWatch metric alarm condition is satisfied.

" }, + "SpotProvisioningAllocationStrategy":{ + "type":"string", + "enum":["capacity-optimized"] + }, "SpotProvisioningSpecification":{ "type":"structure", "required":[ @@ -3419,9 +3592,13 @@ "BlockDurationMinutes":{ "shape":"WholeNumber", "documentation":"

The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.

" + }, + "AllocationStrategy":{ + "shape":"SpotProvisioningAllocationStrategy", + "documentation":"

Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is capacity-optimized (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.

" } }, - "documentation":"

The launch specification for Spot instances in the instance fleet, which determines the defined duration and provisioning timeout behavior.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

" + "documentation":"

The launch specification for Spot instances in the instance fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. Spot instance allocation strategy is available in Amazon EMR version 5.12.1 and later.

" }, "SpotProvisioningTimeoutAction":{ "type":"string", diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index a8084a3430a8..295769a84054 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/eventbridge/src/main/resources/codegen-resources/service-2.json b/services/eventbridge/src/main/resources/codegen-resources/service-2.json index 0b9fa71561a4..340fee80a1c8 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/service-2.json +++ b/services/eventbridge/src/main/resources/codegen-resources/service-2.json @@ -23,7 +23,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"}, {"shape":"InvalidStateException"}, - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

Activates a partner event source that has been deactivated. Once activated, your matching event bus will start receiving events from the event source.

" }, @@ -41,7 +42,8 @@ {"shape":"InvalidStateException"}, {"shape":"InternalException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

Creates a new event bus within your account. This can be a custom event bus which you can use to receive events from your custom applications and services, or it can be a partner event bus which can be matched to a partner event source.

" }, @@ -57,7 +59,8 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"InternalException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

Called by an SaaS partner to create a partner event source. This operation is not used by AWS customers.

Each partner event source can be used by one AWS account to create a matching partner event bus in that AWS account. A SaaS partner must create one partner event source for each AWS account that wants to receive those event types.

A partner event source creates events based on resources within the SaaS partner's service or application.

An AWS account that creates a partner event bus that matches the partner event source can use that event bus to receive events from the partner, and then process them using AWS Events rules and targets.

Partner event source names follow this format:

partner_name/event_namespace/event_name

partner_name is determined during partner registration and identifies the partner to AWS customers. event_namespace is determined by the partner and is a way for the partner to categorize their events. event_name is determined by the partner, and should uniquely identify an event-generating resource within the partner system. The combination of event_namespace and event_name should help AWS customers decide whether to create an event bus to receive these events.

" }, @@ -72,7 +75,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"}, {"shape":"InvalidStateException"}, - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

You can use this operation to temporarily stop receiving events from the specified partner event source. The matching event bus is not deleted.

When you deactivate a partner event source, the source goes into PENDING state. If it remains in PENDING state for more than two weeks, it is deleted.

To activate a deactivated partner event source, use ActivateEventSource.

" }, @@ -98,7 +102,8 @@ "input":{"shape":"DeletePartnerEventSourceRequest"}, "errors":[ {"shape":"InternalException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

This operation is used by SaaS partners to delete a partner event source. This operation is not used by AWS customers.

When you delete an event source, the status of the corresponding partner event bus in the AWS customer account becomes DELETED.

" }, @@ -141,7 +146,8 @@ "output":{"shape":"DescribeEventSourceResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

This operation lists details about a partner event source that is shared with your account.

" }, @@ -155,7 +161,8 @@ "output":{"shape":"DescribePartnerEventSourceResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

An SaaS partner can use this operation to list details about a partner event source that they have created. AWS customers do not use this operation. Instead, AWS customers can use DescribeEventSource to see details about a partner event source that is shared with them.

" }, @@ -225,7 +232,8 @@ "input":{"shape":"ListEventSourcesRequest"}, "output":{"shape":"ListEventSourcesResponse"}, "errors":[ - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

You can use this to see all the partner event sources that have been shared with your AWS account. For more information about partner event sources, see CreateEventBus.

" }, @@ -239,7 +247,8 @@ "output":{"shape":"ListPartnerEventSourceAccountsResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

An SaaS partner can use this operation to display the AWS account ID that a particular partner event source name is associated with. This operation is not used by AWS customers.

" }, @@ -252,7 +261,8 @@ "input":{"shape":"ListPartnerEventSourcesRequest"}, "output":{"shape":"ListPartnerEventSourcesResponse"}, "errors":[ - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

An SaaS partner can use this operation to list all the partner event source names that they have created. This operation is not used by AWS customers.

" }, @@ -334,7 +344,8 @@ "input":{"shape":"PutPartnerEventsRequest"}, "output":{"shape":"PutPartnerEventsResponse"}, "errors":[ - {"shape":"InternalException"} + {"shape":"InternalException"}, + {"shape":"OperationDisabledException"} ], "documentation":"

This is used by SaaS partners to write events to a customer's partner event bus. AWS customers do not use this operation.

" }, @@ -351,7 +362,7 @@ {"shape":"InternalException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Running PutPermission permits the specified AWS account or AWS organization to put events to the specified event bus. CloudWatch Events rules in your account are triggered by these events arriving to an event bus in your account.

For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target.

To enable multiple AWS accounts to put events to your event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as \"*\" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

The permission policy on the default event bus cannot exceed 10 KB in size.

" + "documentation":"

Running PutPermission permits the specified AWS account or AWS organization to put events to the specified event bus. Amazon EventBridge (CloudWatch Events) rules in your account are triggered by these events arriving to an event bus in your account.

For another account to send events to your account, that external account must have an EventBridge rule with your account's event bus as a target.

To enable multiple AWS accounts to put events to your event bus, run PutPermission once for each of these accounts. Or, if all the accounts are members of the same AWS organization, you can run PutPermission once specifying Principal as \"*\" and specifying the AWS organization ID in Condition, to grant permissions to all accounts in that organization.

If you grant permissions using an organization, then accounts in that organization must specify a RoleArn with proper permissions when they use PutTarget to add your account's event bus as a target. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

The permission policy on the default event bus cannot exceed 10 KB in size.

" }, "PutRule":{ "name":"PutRule", @@ -386,7 +397,7 @@ {"shape":"ManagedRuleException"}, {"shape":"InternalException"} ], - "documentation":"

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for Events:

  • EC2 instances

  • SSM Run Command

  • SSM Automation

  • AWS Lambda functions

  • Data streams in Amazon Kinesis Data Streams

  • Data delivery streams in Amazon Kinesis Data Firehose

  • Amazon ECS tasks

  • AWS Step Functions state machines

  • AWS Batch jobs

  • AWS CodeBuild projects

  • Pipelines in AWS CodePipeline

  • Amazon Inspector assessment templates

  • Amazon SNS topics

  • Amazon SQS queues, including FIFO queues

  • The default event bus of another AWS account

Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon CloudWatch Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, and AWS Step Functions state machines, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon CloudWatch Pricing.

Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different AWS account.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" + "documentation":"

Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule.

Targets are the resources that are invoked when a rule is triggered.

You can configure the following as targets for Events:

  • EC2 instances

  • SSM Run Command

  • SSM Automation

  • AWS Lambda functions

  • Data streams in Amazon Kinesis Data Streams

  • Data delivery streams in Amazon Kinesis Data Firehose

  • Amazon ECS tasks

  • AWS Step Functions state machines

  • AWS Batch jobs

  • AWS CodeBuild projects

  • Pipelines in AWS CodePipeline

  • Amazon Inspector assessment templates

  • Amazon SNS topics

  • Amazon SQS queues, including FIFO queues

  • The default event bus of another AWS account

  • Amazon API Gateway REST APIs

Creating rules with built-in targets is supported only in the AWS Management Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances API call.

For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field.

To be able to make API calls against the resources that you own, Amazon EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis data streams, AWS Step Functions state machines and API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide.

If another AWS account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge (CloudWatch Events) Pricing.

Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different AWS account.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

For more information about enabling cross-account events, see PutPermission.

Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event:

  • If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target).

  • If Input is specified in the form of valid JSON, then the matched event is overridden with this constant.

  • If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed).

  • If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target.

When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation.

When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect.

This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code.

" }, "RemovePermission":{ "name":"RemovePermission", @@ -462,7 +473,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ManagedRuleException"} ], - "documentation":"

Removes one or more tags from the specified EventBridge resource. In CloudWatch Events, rules and event buses can be tagged.

" + "documentation":"

Removes one or more tags from the specified EventBridge resource. In Amazon EventBridge (CloudWatch Events, rules and event buses can be tagged.

" } }, "shapes":{ @@ -998,6 +1009,39 @@ ] }, "EventTime":{"type":"timestamp"}, + "HeaderKey":{ + "type":"string", + "max":512, + "pattern":"^[!#$%&'*+-.^_`|~0-9a-zA-Z]+$" + }, + "HeaderParametersMap":{ + "type":"map", + "key":{"shape":"HeaderKey"}, + "value":{"shape":"HeaderValue"} + }, + "HeaderValue":{ + "type":"string", + "max":512, + "pattern":"^[ \\t]*[\\x20-\\x7E]+([ \\t]+[\\x20-\\x7E]+)*[ \\t]*$" + }, + "HttpParameters":{ + "type":"structure", + "members":{ + "PathParameterValues":{ + "shape":"PathParameterList", + "documentation":"

The path parameter values to be used to populate API Gateway REST API path wildcards (\"*\").

" + }, + "HeaderParameters":{ + "shape":"HeaderParametersMap", + "documentation":"

The headers that need to be sent as part of request invoking the API Gateway REST API.

" + }, + "QueryStringParameters":{ + "shape":"QueryStringParametersMap", + "documentation":"

The query string keys/values that need to be sent as part of request invoking the API Gateway REST API.

" + } + }, + "documentation":"

These are custom parameter to be used when the target is an API Gateway REST APIs.

" + }, "InputTransformer":{ "type":"structure", "required":["InputTemplate"], @@ -1355,6 +1399,13 @@ "min":1, "pattern":"[\\.\\-_A-Za-z0-9]+" }, + "OperationDisabledException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The operation you are attempting is not available in this region.

", + "exception":true + }, "PartnerEventSource":{ "type":"structure", "members":{ @@ -1405,6 +1456,14 @@ "min":1, "pattern":"aws\\.partner/[\\.\\-_A-Za-z0-9]+/[/\\.\\-_A-Za-z0-9]*" }, + "PathParameter":{ + "type":"string", + "pattern":"^(?!\\s*$).+" + }, + "PathParameterList":{ + "type":"list", + "member":{"shape":"PathParameter"} + }, "PolicyLengthExceededException":{ "type":"structure", "members":{ @@ -1709,6 +1768,21 @@ "type":"list", "member":{"shape":"PutTargetsResultEntry"} }, + "QueryStringKey":{ + "type":"string", + "max":512, + "pattern":"[^\\x00-\\x1F\\x7F]+" + }, + "QueryStringParametersMap":{ + "type":"map", + "key":{"shape":"QueryStringKey"}, + "value":{"shape":"QueryStringValue"} + }, + "QueryStringValue":{ + "type":"string", + "max":512, + "pattern":"[^\\x00-\\x09\\x0B\\x0C\\x0E-\\x1F\\x7F]+" + }, "RemovePermissionRequest":{ "type":"structure", "required":["StatementId"], @@ -2059,6 +2133,10 @@ "SqsParameters":{ "shape":"SqsParameters", "documentation":"

Contains the message group ID to use when the target is a FIFO queue.

If you specify an SQS FIFO queue as a target, the queue must have content-based deduplication enabled.

" + }, + "HttpParameters":{ + "shape":"HttpParameters", + "documentation":"

Contains the HTTP parameters to use when the target is a API Gateway REST endpoint.

If you specify an API Gateway REST API as a target, you can use this parameter to specify headers, path parameter, query string keys/values as part of your target invoking request.

" } }, "documentation":"

Targets are the resources to be invoked when a rule is triggered. For a complete list of services and resources that can be set as a target, see PutTargets.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between AWS Accounts in the Amazon EventBridge User Guide.

" diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index 6c5d2c021526..13d77d1647c6 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/firehose/src/main/resources/codegen-resources/service-2.json b/services/firehose/src/main/resources/codegen-resources/service-2.json index b7d343d18f7a..daa2f5afcb41 100644 --- a/services/firehose/src/main/resources/codegen-resources/service-2.json +++ b/services/firehose/src/main/resources/codegen-resources/service-2.json @@ -128,7 +128,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidKMSResourceException"} ], - "documentation":"

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. In this case, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement and creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again.

You can only enable SSE for a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

" + "documentation":"

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.

You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

" }, "StopDeliveryStreamEncryption":{ "name":"StopDeliveryStreamEncryption", @@ -247,6 +247,7 @@ }, "ClusterJDBCURL":{ "type":"string", + "max":512, "min":1, "pattern":"jdbc:(redshift|postgresql)://((?!-)[A-Za-z0-9-]{1,63}(?Describes a COPY command for Amazon Redshift.

" }, - "CopyOptions":{"type":"string"}, + "CopyOptions":{ + "type":"string", + "max":204800, + "min":0, + "pattern":".*" + }, "CreateDeliveryStreamInput":{ "type":"structure", "required":["DeliveryStreamName"], @@ -361,15 +368,15 @@ "members":{ "SchemaConfiguration":{ "shape":"SchemaConfiguration", - "documentation":"

Specifies the AWS Glue Data Catalog table that contains the column information.

" + "documentation":"

Specifies the AWS Glue Data Catalog table that contains the column information. This parameter is required if Enabled is set to true.

" }, "InputFormatConfiguration":{ "shape":"InputFormatConfiguration", - "documentation":"

Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON.

" + "documentation":"

Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.

" }, "OutputFormatConfiguration":{ "shape":"OutputFormatConfiguration", - "documentation":"

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format.

" + "documentation":"

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.

" }, "Enabled":{ "shape":"BooleanObject", @@ -378,10 +385,17 @@ }, "documentation":"

Specifies that you want Kinesis Data Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data Firehose uses the serializer and deserializer that you specify, in addition to the column information from the AWS Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Kinesis Data Firehose Record Format Conversion.

" }, - "DataTableColumns":{"type":"string"}, + "DataTableColumns":{ + "type":"string", + "max":204800, + "min":0, + "pattern":".*" + }, "DataTableName":{ "type":"string", - "min":1 + "max":512, + "min":1, + "pattern":".*" }, "DeleteDeliveryStreamInput":{ "type":"structure", @@ -504,10 +518,10 @@ }, "KeyType":{ "shape":"KeyType", - "documentation":"

Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is already encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.

" + "documentation":"

Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.

You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException.

To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the AWS Key Management Service developer guide.

" } }, - "documentation":"

Used to specify the type and Amazon Resource Name (ARN) of the CMK needed for Server-Side Encryption (SSE).

" + "documentation":"

Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side Encryption (SSE).

" }, "DeliveryStreamEncryptionStatus":{ "type":"string", @@ -530,6 +544,13 @@ "INVALID_KMS_KEY", "KMS_KEY_NOT_FOUND", "KMS_OPT_IN_REQUIRED", + "CREATE_ENI_FAILED", + "DELETE_ENI_FAILED", + "SUBNET_NOT_FOUND", + "SECURITY_GROUP_NOT_FOUND", + "ENI_ACCESS_DENIED", + "SUBNET_ACCESS_DENIED", + "SECURITY_GROUP_ACCESS_DENIED", "UNKNOWN_ERROR" ] }, @@ -651,7 +672,8 @@ "DestinationId":{ "type":"string", "max":100, - "min":1 + "min":1, + "pattern":"[a-zA-Z0-9-]+" }, "ElasticsearchBufferingHints":{ "type":"structure", @@ -738,6 +760,10 @@ "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", "documentation":"

The Amazon CloudWatch logging options for your delivery stream.

" + }, + "VpcConfiguration":{ + "shape":"VpcConfiguration", + "documentation":"

The details of the VPC of the Amazon ES destination.

" } }, "documentation":"

Describes the configuration of a destination in Amazon ES.

" @@ -792,6 +818,10 @@ "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", "documentation":"

The Amazon CloudWatch logging options.

" + }, + "VpcConfigurationDescription":{ + "shape":"VpcConfigurationDescription", + "documentation":"

The details of the VPC of the Amazon ES destination.

" } }, "documentation":"

The destination description in Amazon ES.

" @@ -855,7 +885,8 @@ "ElasticsearchIndexName":{ "type":"string", "max":80, - "min":1 + "min":1, + "pattern":".*" }, "ElasticsearchIndexRotationPeriod":{ "type":"string", @@ -892,7 +923,8 @@ "ElasticsearchTypeName":{ "type":"string", "max":100, - "min":0 + "min":0, + "pattern":".*" }, "EncryptionConfiguration":{ "type":"structure", @@ -910,7 +942,12 @@ }, "ErrorCode":{"type":"string"}, "ErrorMessage":{"type":"string"}, - "ErrorOutputPrefix":{"type":"string"}, + "ErrorOutputPrefix":{ + "type":"string", + "max":1024, + "min":0, + "pattern":".*" + }, "ExtendedS3DestinationConfiguration":{ "type":"structure", "required":[ @@ -1107,7 +1144,12 @@ "max":600, "min":180 }, - "HECEndpoint":{"type":"string"}, + "HECEndpoint":{ + "type":"string", + "max":2048, + "min":0, + "pattern":".*" + }, "HECEndpointType":{ "type":"string", "enum":[ @@ -1115,7 +1157,12 @@ "Event" ] }, - "HECToken":{"type":"string"}, + "HECToken":{ + "type":"string", + "max":2048, + "min":0, + "pattern":".*" + }, "HiveJsonSerDe":{ "type":"structure", "members":{ @@ -1134,7 +1181,7 @@ "documentation":"

Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.

" } }, - "documentation":"

Specifies the deserializer you want to use to convert the format of the input data.

" + "documentation":"

Specifies the deserializer you want to use to convert the format of the input data. This parameter is required if Enabled is set to true.

" }, "IntervalInSeconds":{ "type":"integer", @@ -1325,18 +1372,32 @@ "max":50, "min":0 }, - "LogGroupName":{"type":"string"}, - "LogStreamName":{"type":"string"}, + "LogGroupName":{ + "type":"string", + "max":512, + "min":0, + "pattern":"[\\.\\-_/#A-Za-z0-9]*" + }, + "LogStreamName":{ + "type":"string", + "max":512, + "min":0, + "pattern":"[^:*]*" + }, "NoEncryptionConfig":{ "type":"string", "enum":["NoEncryption"] }, "NonEmptyString":{ "type":"string", + "max":1024, + "min":1, "pattern":"^(?!\\s*$).+" }, "NonEmptyStringWithoutWhitespace":{ "type":"string", + "max":1024, + "min":1, "pattern":"^\\S+$" }, "NonNegativeIntegerObject":{ @@ -1438,7 +1499,7 @@ "documentation":"

Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.

" } }, - "documentation":"

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3.

" + "documentation":"

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

" }, "ParquetCompression":{ "type":"string", @@ -1491,10 +1552,17 @@ }, "Password":{ "type":"string", + "max":512, "min":6, + "pattern":".*", "sensitive":true }, - "Prefix":{"type":"string"}, + "Prefix":{ + "type":"string", + "max":1024, + "min":0, + "pattern":".*" + }, "ProcessingConfiguration":{ "type":"structure", "members":{ @@ -1563,7 +1631,8 @@ "ProcessorParameterValue":{ "type":"string", "max":512, - "min":1 + "min":1, + "pattern":"^(?!\\s*$).+" }, "ProcessorType":{ "type":"string", @@ -2059,7 +2128,13 @@ "documentation":"

Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.

" } }, - "documentation":"

Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3.

" + "documentation":"

Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

" + }, + "SecurityGroupIdList":{ + "type":"list", + "member":{"shape":"NonEmptyStringWithoutWhitespace"}, + "max":5, + "min":1 }, "Serializer":{ "type":"structure", @@ -2290,6 +2365,12 @@ "members":{ } }, + "SubnetIdList":{ + "type":"list", + "member":{"shape":"NonEmptyStringWithoutWhitespace"}, + "max":16, + "min":1 + }, "Tag":{ "type":"structure", "required":["Key"], @@ -2336,7 +2417,8 @@ "TagKey":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":"^(?!aws:)[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*$" }, "TagKeyList":{ "type":"list", @@ -2347,7 +2429,8 @@ "TagValue":{ "type":"string", "max":256, - "min":0 + "min":0, + "pattern":"^[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*$" }, "Timestamp":{"type":"timestamp"}, "UntagDeliveryStreamInput":{ @@ -2422,8 +2505,61 @@ }, "Username":{ "type":"string", + "max":512, "min":1, + "pattern":".*", "sensitive":true + }, + "VpcConfiguration":{ + "type":"structure", + "required":[ + "SubnetIds", + "RoleARN", + "SecurityGroupIds" + ], + "members":{ + "SubnetIds":{ + "shape":"SubnetIdList", + "documentation":"

The IDs of the subnets that you want Kinesis Data Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

" + }, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC.

" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

The IDs of the security groups that you want Kinesis Data Firehose to use when it creates ENIs in the VPC of the Amazon ES destination.

" + } + }, + "documentation":"

The details of the VPC of the Amazon ES destination.

" + }, + "VpcConfigurationDescription":{ + "type":"structure", + "required":[ + "SubnetIds", + "RoleARN", + "SecurityGroupIds", + "VpcId" + ], + "members":{ + "SubnetIds":{ + "shape":"SubnetIdList", + "documentation":"

The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

" + }, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The ARN of the IAM role that you want the delivery stream uses to create endpoints in the destination VPC.

" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

The IDs of the security groups that Kinesis Data Firehose uses when it creates ENIs in the VPC of the Amazon ES destination.

" + }, + "VpcId":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

The ID of the Amazon ES destination's VPC.

" + } + }, + "documentation":"

The details of the VPC of the Amazon ES destination.

" } }, "documentation":"Amazon Kinesis Data Firehose API Reference

Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), Amazon Redshift, and Splunk.

" diff --git a/services/fms/pom.xml b/services/fms/pom.xml index e04681dde95f..2112deeda1e5 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/fms/src/main/resources/codegen-resources/service-2.json b/services/fms/src/main/resources/codegen-resources/service-2.json index 0d894abebcd4..dc81eb328534 100644 --- a/services/fms/src/main/resources/codegen-resources/service-2.json +++ b/services/fms/src/main/resources/codegen-resources/service-2.json @@ -235,7 +235,7 @@ {"shape":"InternalErrorException"}, {"shape":"InvalidTypeException"} ], - "documentation":"

Creates an AWS Firewall Manager policy.

Firewall Manager provides the following types of policies:

  • A Shield Advanced policy, which applies Shield Advanced protection to specified accounts and resources

  • An AWS WAF policy, which contains a rule group and defines which resources are to be protected by that rule group

  • A security group policy, which manages VPC security groups across your AWS organization.

Each policy is specific to one of the three types. If you want to enforce more than one policy type across accounts, you can create multiple policies. You can create multiple policies for each type.

You must be subscribed to Shield Advanced to create a Shield Advanced policy. For more information about subscribing to Shield Advanced, see CreateSubscription.

" + "documentation":"

Creates an AWS Firewall Manager policy.

Firewall Manager provides the following types of policies:

  • A Shield Advanced policy, which applies Shield Advanced protection to specified accounts and resources

  • An AWS WAF policy (type WAFV2), which defines rule groups to run first in the corresponding AWS WAF web ACL and rule groups to run last in the web ACL.

  • An AWS WAF Classic policy (type WAF), which defines a rule group.

  • A security group policy, which manages VPC security groups across your AWS organization.

Each policy is specific to one of the types. If you want to enforce more than one policy type across accounts, create multiple policies. You can create multiple policies for each type.

You must be subscribed to Shield Advanced to create a Shield Advanced policy. For more information about subscribing to Shield Advanced, see CreateSubscription.

" }, "TagResource":{ "name":"TagResource", @@ -333,7 +333,10 @@ }, "CustomerPolicyScopeIdType":{ "type":"string", - "enum":["ACCOUNT"] + "enum":[ + "ACCOUNT", + "ORG_UNIT" + ] }, "CustomerPolicyScopeMap":{ "type":"map", @@ -754,11 +757,11 @@ }, "IncludeMap":{ "shape":"CustomerPolicyScopeMap", - "documentation":"

Specifies the AWS account IDs to include in the policy. If IncludeMap is null, all accounts in the organization in AWS Organizations are included in the policy. If IncludeMap is not null, only values listed in IncludeMap are included in the policy.

The key to the map is ACCOUNT. For example, a valid IncludeMap would be {“ACCOUNT” : [“accountID1”, “accountID2”]}.

" + "documentation":"

Specifies the AWS account IDs and AWS Organizations organizational units (OUs) to include in the policy. Specifying an OU is the equivalent of specifying all accounts in the OU and in any of its child OUs, including any child OUs and accounts that are added at a later time.

You can specify inclusions or exclusions, but not both. If you specify an IncludeMap, AWS Firewall Manager applies the policy to all accounts specified by the IncludeMap, and does not evaluate any ExcludeMap specifications. If you do not specify an IncludeMap, then Firewall Manager applies the policy to all accounts except for those specified by the ExcludeMap.

You can specify account IDs, OUs, or a combination:

  • Specify account IDs by setting the key to ACCOUNT. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}.

  • Specify OUs by setting the key to ORG_UNIT. For example, the following is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}.

  • Specify accounts and OUs together in a single map, separated with a comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}.

" }, "ExcludeMap":{ "shape":"CustomerPolicyScopeMap", - "documentation":"

Specifies the AWS account IDs to exclude from the policy. The IncludeMap values are evaluated first, with all the appropriate account IDs added to the policy. Then the accounts listed in ExcludeMap are removed, resulting in the final list of accounts to add to the policy.

The key to the map is ACCOUNT. For example, a valid ExcludeMap would be {“ACCOUNT” : [“accountID1”, “accountID2”]}.

" + "documentation":"

Specifies the AWS account IDs and AWS Organizations organizational units (OUs) to exclude from the policy. Specifying an OU is the equivalent of specifying all accounts in the OU and in any of its child OUs, including any child OUs and accounts that are added at a later time.

You can specify inclusions or exclusions, but not both. If you specify an IncludeMap, AWS Firewall Manager applies the policy to all accounts specified by the IncludeMap, and does not evaluate any ExcludeMap specifications. If you do not specify an IncludeMap, then Firewall Manager applies the policy to all accounts except for those specified by the ExcludeMap.

You can specify account IDs, OUs, or a combination:

  • Specify account IDs by setting the key to ACCOUNT. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}.

  • Specify OUs by setting the key to ORG_UNIT. For example, the following is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}.

  • Specify accounts and OUs together in a single map, separated with a comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}.

" } }, "documentation":"

An AWS Firewall Manager policy.

" @@ -1015,7 +1018,7 @@ }, "ManagedServiceData":{ "shape":"ManagedServiceData", - "documentation":"

Details about the service that are specific to the service type, in JSON format. For service type SHIELD_ADVANCED, this is an empty string.

  • Example: WAFV2

    \"SecurityServicePolicyData\": \"{ \\\"type\\\": \\\"WAFV2\\\", \\\"postProcessRuleGroups\\\": [ { \\\"managedRuleGroupIdentifier\\\": { \\\"managedRuleGroupName\\\": \\\"AWSManagedRulesAdminProtectionRuleSet\\\", \\\"vendor\\\": \\\"AWS\\\" } \\\"ruleGroupARN\\\": \\\"rule group arn\", \\\"overrideAction\\\": { \\\"type\\\": \\\"COUNT|\\\" }, \\\"excludedRules\\\": [ { \\\"name\\\" : \\\"EntityName\\\" } ], \\\"type\\\": \\\"ManagedRuleGroup|RuleGroup\\\" } ], \\\"preProcessRuleGroups\\\": [ { \\\"managedRuleGroupIdentifier\\\": { \\\"managedRuleGroupName\\\": \\\"AWSManagedRulesAdminProtectionRuleSet\\\", \\\"vendor\\\": \\\"AWS\\\" } \\\"ruleGroupARN\\\": \\\"rule group arn\\\", \\\"overrideAction\\\": { \\\"type\\\": \\\"COUNT\\\" }, \\\"excludedRules\\\": [ { \\\"name\\\" : \\\"EntityName\\\" } ], \\\"type\\\": \\\"ManagedRuleGroup|RuleGroup\\\" } ], \\\"defaultAction\\\": { \\\"type\\\": \\\"BLOCK\\\" }}\"

  • Example: WAF

    \"ManagedServiceData\": \"{\\\"type\\\": \\\"WAF\\\", \\\"ruleGroups\\\": [{\\\"id\\\": \\\"12345678-1bcd-9012-efga-0987654321ab\\\", \\\"overrideAction\\\" : {\\\"type\\\": \\\"COUNT\\\"}}], \\\"defaultAction\\\": {\\\"type\\\": \\\"BLOCK\\\"}}

  • Example: SECURITY_GROUPS_COMMON

    \"SecurityServicePolicyData\":{\"Type\":\"SECURITY_GROUPS_COMMON\",\"ManagedServiceData\":\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"},\"RemediationEnabled\":false,\"ResourceType\":\"AWS::EC2::NetworkInterface\"}

  • Example: SECURITY_GROUPS_CONTENT_AUDIT

    \"SecurityServicePolicyData\":{\"Type\":\"SECURITY_GROUPS_CONTENT_AUDIT\",\"ManagedServiceData\":\"{\\\"type\\\":\\\"SECURITY_GROUPS_CONTENT_AUDIT\\\",\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd \\\"}],\\\"securityGroupAction\\\":{\\\"type\\\":\\\"ALLOW\\\"}}\"},\"RemediationEnabled\":false,\"ResourceType\":\"AWS::EC2::NetworkInterface\"}

    The security group action for content audit can be ALLOW or DENY. For ALLOW, all in-scope security group rules must be within the allowed range of the policy's security group rules. For DENY, all in-scope security group rules must not contain a value or a range that matches a rule value or range in the policy security group.

  • Example: SECURITY_GROUPS_USAGE_AUDIT

    \"SecurityServicePolicyData\":{\"Type\":\"SECURITY_GROUPS_USAGE_AUDIT\",\"ManagedServiceData\":\"{\\\"type\\\":\\\"SECURITY_GROUPS_USAGE_AUDIT\\\",\\\"deleteUnusedSecurityGroups\\\":true,\\\"coalesceRedundantSecurityGroups\\\":true}\"},\"RemediationEnabled\":false,\"Resou rceType\":\"AWS::EC2::SecurityGroup\"}

" + "documentation":"

Details about the service that are specific to the service type, in JSON format. For service type SHIELD_ADVANCED, this is an empty string.

  • Example: WAFV2

    \"ManagedServiceData\": \"{\\\"type\\\":\\\"WAFV2\\\",\\\"defaultAction\\\":{\\\"type\\\":\\\"ALLOW\\\"},\\\"preProcessRuleGroups\\\":[{\\\"managedRuleGroupIdentifier\\\":null,\\\"ruleGroupArn\\\":\\\"rulegrouparn\\\",\\\"overrideAction\\\":{\\\"type\\\":\\\"COUNT\\\"},\\\"excludedRules\\\":[{\\\"name\\\":\\\"EntityName\\\"}],\\\"ruleGroupType\\\":\\\"RuleGroup\\\"}],\\\"postProcessRuleGroups\\\":[{\\\"managedRuleGroupIdentifier\\\":{\\\"managedRuleGroupName\\\":\\\"AWSManagedRulesAdminProtectionRuleSet\\\",\\\"vendor\\\":\\\"AWS\\\"},\\\"ruleGroupArn\\\":\\\"rulegrouparn\\\",\\\"overrideAction\\\":{\\\"type\\\":\\\"NONE\\\"},\\\"excludedRules\\\":[],\\\"ruleGroupType\\\":\\\"ManagedRuleGroup\\\"}],\\\"overrideCustomerWebACLAssociation\\\":false}\"

  • Example: WAF Classic

    \"ManagedServiceData\": \"{\\\"type\\\": \\\"WAF\\\", \\\"ruleGroups\\\": [{\\\"id\\\": \\\"12345678-1bcd-9012-efga-0987654321ab\\\", \\\"overrideAction\\\" : {\\\"type\\\": \\\"COUNT\\\"}}], \\\"defaultAction\\\": {\\\"type\\\": \\\"BLOCK\\\"}}

  • Example: SECURITY_GROUPS_COMMON

    \"SecurityServicePolicyData\":{\"Type\":\"SECURITY_GROUPS_COMMON\",\"ManagedServiceData\":\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"},\"RemediationEnabled\":false,\"ResourceType\":\"AWS::EC2::NetworkInterface\"}

  • Example: SECURITY_GROUPS_CONTENT_AUDIT

    \"SecurityServicePolicyData\":{\"Type\":\"SECURITY_GROUPS_CONTENT_AUDIT\",\"ManagedServiceData\":\"{\\\"type\\\":\\\"SECURITY_GROUPS_CONTENT_AUDIT\\\",\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd \\\"}],\\\"securityGroupAction\\\":{\\\"type\\\":\\\"ALLOW\\\"}}\"},\"RemediationEnabled\":false,\"ResourceType\":\"AWS::EC2::NetworkInterface\"}

    The security group action for content audit can be ALLOW or DENY. For ALLOW, all in-scope security group rules must be within the allowed range of the policy's security group rules. For DENY, all in-scope security group rules must not contain a value or a range that matches a rule value or range in the policy security group.

  • Example: SECURITY_GROUPS_USAGE_AUDIT

    \"SecurityServicePolicyData\":{\"Type\":\"SECURITY_GROUPS_USAGE_AUDIT\",\"ManagedServiceData\":\"{\\\"type\\\":\\\"SECURITY_GROUPS_USAGE_AUDIT\\\",\\\"deleteUnusedSecurityGroups\\\":true,\\\"coalesceRedundantSecurityGroups\\\":true}\"},\"RemediationEnabled\":false,\"Resou rceType\":\"AWS::EC2::SecurityGroup\"}

" } }, "documentation":"

Details about the security service that is being used to protect the resources.

" diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index 57c786bbf44f..33aebd0d4188 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecast/src/main/resources/codegen-resources/service-2.json b/services/forecast/src/main/resources/codegen-resources/service-2.json index 0290ea5eb381..9537a6b4fab9 100644 --- a/services/forecast/src/main/resources/codegen-resources/service-2.json +++ b/services/forecast/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:

  • DataFrequency - How frequently your historical time-series data is collected.

  • Domain and DatasetType - Each dataset has an associated dataset domain and a type within the domain. Amazon Forecast provides a list of predefined domains and types within each domain. For each unique dataset domain and type within the domain, Amazon Forecast requires your data to include a minimum set of predefined fields.

  • Schema - A schema specifies the fields in the dataset, including the field name and data type.

After creating a dataset, you import your training data into it and add the dataset to a dataset group. You use the dataset group to create a predictor. For more information, see howitworks-datasets-groups.

To get a list of all your datasets, use the ListDatasets operation.

For example Forecast datasets, see the Amazon Forecast Sample GitHub repository.

The Status of a dataset must be ACTIVE before you can import training data. Use the DescribeDataset operation to get the status.

" + "documentation":"

Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:

  • DataFrequency - How frequently your historical time-series data is collected.

  • Domain and DatasetType - Each dataset has an associated dataset domain and a type within the domain. Amazon Forecast provides a list of predefined domains and types within each domain. For each unique dataset domain and type within the domain, Amazon Forecast requires your data to include a minimum set of predefined fields.

  • Schema - A schema specifies the fields in the dataset, including the field name and data type.

After creating a dataset, you import your training data into it and add the dataset to a dataset group. You use the dataset group to create a predictor. For more information, see howitworks-datasets-groups.

To get a list of all your datasets, use the ListDatasets operation.

For example Forecast datasets, see the Amazon Forecast Sample GitHub repository.

The Status of a dataset must be ACTIVE before you can import training data. Use the DescribeDataset operation to get the status.

" }, "CreateDatasetGroup":{ "name":"CreateDatasetGroup", @@ -60,7 +60,7 @@ {"shape":"ResourceInUseException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.

You must specify a DataSource object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. For more information, see aws-forecast-iam-roles.

The training data must be in CSV format. The delimiter must be a comma (,).

You can specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to the limit of 10,000 files.

To get a list of all your dataset import jobs, filtered by specified criteria, use the ListDatasetImportJobs operation.

" + "documentation":"

Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.

You must specify a DataSource object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data, as Amazon Forecast makes a copy of your data and processes it in an internal AWS system. For more information, see aws-forecast-iam-roles.

The training data must be in CSV format. The delimiter must be a comma (,).

You can specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to the limit of 10,000 files.

Because dataset imports are not aggregated, your most recent dataset import is the one that is used when training a predictor or generating a forecast. Make sure that your most recent dataset import contains all of the data you want to model off of, and not just the new data collected since the previous import.

To get a list of all your dataset import jobs, filtered by specified criteria, use the ListDatasetImportJobs operation.

" }, "CreateForecast":{ "name":"CreateForecast", @@ -77,7 +77,7 @@ {"shape":"ResourceInUseException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a forecast for each item in the TARGET_TIME_SERIES dataset that was used to train the predictor. This is known as inference. To retrieve the forecast for a single item at low latency, use the operation. To export the complete forecast into your Amazon Simple Storage Service (Amazon S3) bucket, use the CreateForecastExportJob operation.

The range of the forecast is determined by the ForecastHorizon value, which you specify in the CreatePredictor request, multiplied by the DataFrequency value, which you specify in the CreateDataset request. When you query a forecast, you can request a specific date range within the forecast.

To get a list of all your forecasts, use the ListForecasts operation.

The forecasts generated by Amazon Forecast are in the same time zone as the dataset that was used to create the predictor.

For more information, see howitworks-forecast.

The Status of the forecast must be ACTIVE before you can query or export the forecast. Use the DescribeForecast operation to get the status.

" + "documentation":"

Creates a forecast for each item in the TARGET_TIME_SERIES dataset that was used to train the predictor. This is known as inference. To retrieve the forecast for a single item at low latency, use the operation. To export the complete forecast into your Amazon Simple Storage Service (Amazon S3) bucket, use the CreateForecastExportJob operation.

The range of the forecast is determined by the ForecastHorizon value, which you specify in the CreatePredictor request. When you query a forecast, you can request a specific date range within the forecast.

To get a list of all your forecasts, use the ListForecasts operation.

The forecasts generated by Amazon Forecast are in the same time zone as the dataset that was used to create the predictor.

For more information, see howitworks-forecast.

The Status of the forecast must be ACTIVE before you can query or export the forecast. Use the DescribeForecast operation to get the status.

" }, "CreateForecastExportJob":{ "name":"CreateForecastExportJob", @@ -94,7 +94,7 @@ {"shape":"ResourceInUseException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Exports a forecast created by the CreateForecast operation to your Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will match the following conventions:

<ForecastExportJobName>_<ExportTimestamp>_<PageNumber>

where the <ExportTimestamp> component is in Java SimpleDateFormat (yyyy-MM-ddTHH-mm-ssZ).

You must specify a DataDestination object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.

For more information, see howitworks-forecast.

To get a list of all your forecast export jobs, use the ListForecastExportJobs operation.

The Status of the forecast export job must be ACTIVE before you can access the forecast in your Amazon S3 bucket. To get the status, use the DescribeForecastExportJob operation.

" + "documentation":"

Exports a forecast created by the CreateForecast operation to your Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will match the following conventions:

<ForecastExportJobName>_<ExportTimestamp>_<PartNumber>

where the <ExportTimestamp> component is in Java SimpleDateFormat (yyyy-MM-ddTHH-mm-ssZ).

You must specify a DataDestination object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.

For more information, see howitworks-forecast.

To get a list of all your forecast export jobs, use the ListForecastExportJobs operation.

The Status of the forecast export job must be ACTIVE before you can access the forecast in your Amazon S3 bucket. To get the status, use the DescribeForecastExportJob operation.

" }, "CreatePredictor":{ "name":"CreatePredictor", @@ -125,7 +125,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Deletes an Amazon Forecast dataset that was created using the CreateDataset operation. You can only delete datasets that have a status of ACTIVE or CREATE_FAILED. To get the status use the DescribeDataset operation.

", + "documentation":"

Deletes an Amazon Forecast dataset that was created using the CreateDataset operation. You can only delete datasets that have a status of ACTIVE or CREATE_FAILED. To get the status use the DescribeDataset operation.

Forecast does not automatically update any dataset groups that contain the deleted dataset. In order to update the dataset group, use the operation, omitting the deleted dataset's ARN.

", "idempotent":true }, "DeleteDatasetGroup":{ @@ -397,6 +397,49 @@ "documentation":"

Returns a list of predictors created using the CreatePredictor operation. For each predictor, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the DescribePredictor operation. You can filter the list using an array of Filter objects.

", "idempotent":true }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Lists the tags for an Amazon Forecast resource.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are also deleted.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidInputException"} + ], + "documentation":"

Deletes the specified tags from a resource.

" + }, "UpdateDatasetGroup":{ "name":"UpdateDatasetGroup", "http":{ @@ -509,6 +552,10 @@ "DatasetArns":{ "shape":"ArnList", "documentation":"

An array of Amazon Resource Names (ARNs) of the datasets that you want to include in the dataset group.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The optional metadata that you apply to the dataset group to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" } } }, @@ -544,6 +591,10 @@ "TimestampFormat":{ "shape":"TimestampFormat", "documentation":"

The format of timestamps in the dataset. The format that you specify depends on the DataFrequency specified when the dataset was created. The following formats are supported

  • \"yyyy-MM-dd\"

    For the following data frequencies: Y, M, W, and D

  • \"yyyy-MM-dd HH:mm:ss\"

    For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D

If the format isn't specified, Amazon Forecast expects the format to be \"yyyy-MM-dd HH:mm:ss\".

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The optional metadata that you apply to the dataset import job to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" } } }, @@ -588,6 +639,10 @@ "EncryptionConfig":{ "shape":"EncryptionConfig", "documentation":"

An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The optional metadata that you apply to the dataset to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" } } }, @@ -619,6 +674,10 @@ "Destination":{ "shape":"DataDestination", "documentation":"

The location where you want to save the forecast and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the location. The forecast must be exported to an Amazon S3 bucket.

If encryption is used, Destination must include an AWS Key Management Service (KMS) key. The IAM role must allow Amazon Forecast permission to access the key.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The optional metadata that you apply to the forecast export job to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" } } }, @@ -648,7 +707,11 @@ }, "ForecastTypes":{ "shape":"ForecastTypes", - "documentation":"

The quantiles at which probabilistic forecasts are generated. You can specify up to 5 quantiles per forecast. Accepted values include 0.01 to 0.99 (increments of .01 only) and mean. The mean forecast is different from the median (0.50) when the distribution is not symmetric (e.g. Beta, Negative Binomial). The default value is [\"0.1\", \"0.5\", \"0.9\"].

" + "documentation":"

The quantiles at which probabilistic forecasts are generated. You can currently specify up to 5 quantiles per forecast. Accepted values include 0.01 to 0.99 (increments of .01 only) and mean. The mean forecast is different from the median (0.50) when the distribution is not symmetric (for example, Beta and Negative Binomial). The default value is [\"0.1\", \"0.5\", \"0.9\"].

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The optional metadata that you apply to the forecast to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" } } }, @@ -713,6 +776,10 @@ "EncryptionConfig":{ "shape":"EncryptionConfig", "documentation":"

An AWS Key Management Service (KMS) key and the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the key.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The optional metadata that you apply to the predictor to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" } } }, @@ -1140,7 +1207,7 @@ }, "ForecastTypes":{ "shape":"ForecastTypes", - "documentation":"

The quantiles at which proababilistic forecasts were generated.

" + "documentation":"

The quantiles at which probabilistic forecasts were generated.

" }, "PredictorArn":{ "shape":"Arn", @@ -1332,7 +1399,7 @@ "members":{ "AttributeName":{ "shape":"Name", - "documentation":"

The name of the schema attribute that specifies the data field to be featurized. Only the target field of the TARGET_TIME_SERIES dataset type is supported. For example, for the RETAIL domain, the target is demand, and for the CUSTOM domain, the target is target_value.

" + "documentation":"

The name of the schema attribute that specifies the data field to be featurized. Amazon Forecast supports the target field of the TARGET_TIME_SERIES and the RELATED_TIME_SERIES datasets. For example, for the RETAIL domain, the target is demand, and for the CUSTOM domain, the target is target_value. For more information, see howitworks-missing-values.

" }, "FeaturizationPipeline":{ "shape":"FeaturizationPipeline", @@ -1355,10 +1422,10 @@ }, "Featurizations":{ "shape":"Featurizations", - "documentation":"

An array of featurization (transformation) information for the fields of a dataset. Only a single featurization is supported.

" + "documentation":"

An array of featurization (transformation) information for the fields of a dataset.

" } }, - "documentation":"

In a CreatePredictor operation, the specified algorithm trains a model using the specified dataset group. You can optionally tell the operation to modify data fields prior to training a model. These modifications are referred to as featurization.

You define featurization using the FeaturizationConfig object. You specify an array of transformations, one for each field that you want to featurize. You then include the FeaturizationConfig object in your CreatePredictor request. Amazon Forecast applies the featurization to the TARGET_TIME_SERIES dataset before model training.

You can create multiple featurization configurations. For example, you might call the CreatePredictor operation twice by specifying different featurization configurations.

" + "documentation":"

In a CreatePredictor operation, the specified algorithm trains a model using the specified dataset group. You can optionally tell the operation to modify data fields prior to training a model. These modifications are referred to as featurization.

You define featurization using the FeaturizationConfig object. You specify an array of transformations, one for each field that you want to featurize. You then include the FeaturizationConfig object in your CreatePredictor request. Amazon Forecast applies the featurization to the TARGET_TIME_SERIES and RELATED_TIME_SERIES datasets before model training.

You can create multiple featurization configurations. For example, you might call the CreatePredictor operation twice by specifying different featurization configurations.

" }, "FeaturizationMethod":{ "type":"structure", @@ -1370,10 +1437,10 @@ }, "FeaturizationMethodParameters":{ "shape":"FeaturizationMethodParameters", - "documentation":"

The method parameters (key-value pairs). Specify these parameters to override the default values. The following list shows the parameters and their valid values. Bold signifies the default value.

  • aggregation: sum, avg, first, min, max

  • frontfill: none

  • middlefill: zero, nan (not a number)

  • backfill: zero, nan

" + "documentation":"

The method parameters (key-value pairs), which are a map of override parameters. Specify these parameters to override the default values. Related Time Series attributes do not accept aggregation parameters.

The following list shows the parameters and their valid values for the \"filling\" featurization method for a Target Time Series dataset. Bold signifies the default value.

  • aggregation: sum, avg, first, min, max

  • frontfill: none

  • middlefill: zero, nan (not a number), value, median, mean, min, max

  • backfill: zero, nan, value, median, mean, min, max

The following list shows the parameters and their valid values for a Related Time Series featurization method (there are no defaults):

  • middlefill: zero, value, median, mean, min, max

  • backfill: zero, value, median, mean, min, max

  • futurefill: zero, value, median, mean, min, max

" } }, - "documentation":"

Provides information about the method that featurizes (transforms) a dataset field. The method is part of the FeaturizationPipeline of the Featurization object. If you don't specify FeaturizationMethodParameters, Amazon Forecast uses default parameters.

The following is an example of how you specify a FeaturizationMethod object.

{

\"FeaturizationMethodName\": \"filling\",

\"FeaturizationMethodParameters\": {\"aggregation\": \"avg\", \"backfill\": \"nan\"}

}

" + "documentation":"

Provides information about the method that featurizes (transforms) a dataset field. The method is part of the FeaturizationPipeline of the Featurization object.

The following is an example of how you specify a FeaturizationMethod object.

{

\"FeaturizationMethodName\": \"filling\",

\"FeaturizationMethodParameters\": {\"aggregation\": \"sum\", \"middlefill\": \"zero\", \"backfill\": \"zero\"}

}

" }, "FeaturizationMethodName":{ "type":"string", @@ -1395,7 +1462,7 @@ "Featurizations":{ "type":"list", "member":{"shape":"Featurization"}, - "max":1, + "max":50, "min":1 }, "FieldStatistics":{ @@ -1816,6 +1883,25 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast export jobs.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

The tags for the resource.

" + } + } + }, "MaxResults":{ "type":"integer", "max":100, @@ -2032,7 +2118,9 @@ }, "SchemaAttributes":{ "type":"list", - "member":{"shape":"SchemaAttribute"} + "member":{"shape":"SchemaAttribute"}, + "max":100, + "min":1 }, "Statistics":{ "type":"structure", @@ -2094,10 +2182,10 @@ }, "Value":{ "shape":"Value", - "documentation":"

One of the following 2 letter country codes:

  • \"AU\" - AUSTRALIA

  • \"DE\" - GERMANY

  • \"JP\" - JAPAN

  • \"US\" - UNITED_STATES

  • \"UK\" - UNITED_KINGDOM

" + "documentation":"

One of the following 2 letter country codes:

  • \"AR\" - ARGENTINA

  • \"AT\" - AUSTRIA

  • \"AU\" - AUSTRALIA

  • \"BE\" - BELGIUM

  • \"BR\" - BRAZIL

  • \"CA\" - CANADA

  • \"CN\" - CHINA

  • \"CZ\" - CZECH REPUBLIC

  • \"DK\" - DENMARK

  • \"EC\" - ECUADOR

  • \"FI\" - FINLAND

  • \"FR\" - FRANCE

  • \"DE\" - GERMANY

  • \"HU\" - HUNGARY

  • \"IE\" - IRELAND

  • \"IN\" - INDIA

  • \"IT\" - ITALY

  • \"JP\" - JAPAN

  • \"KR\" - KOREA

  • \"LU\" - LUXEMBOURG

  • \"MX\" - MEXICO

  • \"NL\" - NETHERLANDS

  • \"NO\" - NORWAY

  • \"PL\" - POLAND

  • \"PT\" - PORTUGAL

  • \"RU\" - RUSSIA

  • \"ZA\" - SOUTH AFRICA

  • \"ES\" - SPAIN

  • \"SE\" - SWEDEN

  • \"CH\" - SWITZERLAND

  • \"US\" - UNITED STATES

  • \"UK\" - UNITED KINGDOM

" } }, - "documentation":"

Describes a supplementary feature of a dataset group. This object is part of the InputDataConfig object.

The only supported feature is a holiday calendar. If you use the calendar, all data in the datasets should belong to the same country as the calendar. For the holiday calendar data, see the Jollyday web site.

" + "documentation":"

Describes a supplementary feature of a dataset group. This object is part of the InputDataConfig object.

The only supported feature is a holiday calendar. If you use the calendar, all data in the datasets should belong to the same country as the calendar. For the holiday calendar data, see the Jollyday web site.

India and Korea's holidays are not included in the Jollyday library, but both are supported by Amazon Forecast. Their holidays are:

\"IN\" - INDIA

  • JANUARY 26 - REPUBLIC DAY

  • AUGUST 15 - INDEPENDENCE DAY

  • OCTOBER 2 GANDHI'S BIRTHDAY

\"KR\" - KOREA

  • JANUARY 1 - NEW YEAR

  • MARCH 1 - INDEPENDENCE MOVEMENT DAY

  • MAY 5 - CHILDREN'S DAY

  • JUNE 6 - MEMORIAL DAY

  • AUGUST 15 - LIBERATION DAY

  • OCTOBER 3 - NATIONAL FOUNDATION DAY

  • OCTOBER 9 - HANGEUL DAY

  • DECEMBER 25 - CHRISTMAS DAY

" }, "SupplementaryFeatures":{ "type":"list", @@ -2105,6 +2193,70 @@ "max":1, "min":1 }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

One part of a key-value pair that makes up a tag. A key is a general label that acts like a category for more specific tag values.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The optional part of a key-value pair that makes up a tag. A value acts as a descriptor within a tag category (key).

" + } + }, + "documentation":"

The optional metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast export jobs.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The tags to add to the resource. A tag is an array of key-value pairs.

The following basic restrictions apply to tags:

  • Maximum number of tags per resource - 50.

  • For each resource, each tag key must be unique, and each tag key can have only one value.

  • Maximum key length - 128 Unicode characters in UTF-8.

  • Maximum value length - 256 Unicode characters in UTF-8.

  • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

  • Tag keys and values are case sensitive.

  • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "Tags":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, "TestWindowDetails":{ "type":"list", "member":{"shape":"TestWindowSummary"} @@ -2148,6 +2300,28 @@ "max":100, "min":0 }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast exports.

" + }, + "TagKeys":{ + "shape":"TagKeys", + "documentation":"

The keys of the tags to be removed.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateDatasetGroupRequest":{ "type":"structure", "required":[ diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index 674550f3f790..e0cae97c2962 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 4424098c873b..316f0e1f7fc7 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/frauddetector/src/main/resources/codegen-resources/service-2.json b/services/frauddetector/src/main/resources/codegen-resources/service-2.json index 225f69e1a344..5c40fb61c0c5 100644 --- a/services/frauddetector/src/main/resources/codegen-resources/service-2.json +++ b/services/frauddetector/src/main/resources/codegen-resources/service-2.json @@ -68,6 +68,7 @@ "output":{"shape":"CreateModelVersionResult"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], @@ -103,6 +104,22 @@ ], "documentation":"

Creates a variable.

" }, + "DeleteDetector":{ + "name":"DeleteDetector", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDetectorRequest"}, + "output":{"shape":"DeleteDetectorResult"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes the detector. Before deleting a detector, you must first delete all detector versions and rule versions associated with the detector.

" + }, "DeleteDetectorVersion":{ "name":"DeleteDetectorVersion", "http":{ @@ -115,9 +132,10 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"} ], - "documentation":"

Deletes the detector version.

" + "documentation":"

Deletes the detector version. You cannot delete detector versions that are in ACTIVE status.

" }, "DeleteEvent":{ "name":"DeleteEvent", @@ -133,6 +151,22 @@ ], "documentation":"

Deletes the specified event.

" }, + "DeleteRuleVersion":{ + "name":"DeleteRuleVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRuleVersionRequest"}, + "output":{"shape":"DeleteRuleVersionResult"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes the rule version. You cannot delete a rule version if it is used by an ACTIVE or INACTIVE detector version.

" + }, "DescribeDetector":{ "name":"DescribeDetector", "http":{ @@ -568,6 +602,15 @@ } } }, + "ConflictException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"string"} + }, + "documentation":"

An exception indicating there was a conflict during a delete operation. The following delete operations can cause a conflict exception:

  • DeleteDetector: A conflict exception will occur if the detector has associated Rules or DetectorVersions. You can only delete a detector if it has no Rules or DetectorVersions.

  • DeleteDetectorVersion: A conflict exception will occur if the DetectorVersion status is ACTIVE.

  • DeleteRuleVersion: A conflict exception will occur if the RuleVersion is in use by an associated ACTIVE or INACTIVE DetectorVersion.

", + "exception":true + }, "CreateDetectorVersionRequest":{ "type":"structure", "required":[ @@ -594,6 +637,10 @@ "modelVersions":{ "shape":"ListOfModelVersions", "documentation":"

The model versions to include in the detector version.

" + }, + "ruleExecutionMode":{ + "shape":"RuleExecutionMode", + "documentation":"

The rule execution mode for the rules included in the detector version.

You can define and edit the rule mode at the detector version level, when it is in draft status.

If you specify FIRST_MATCHED, Amazon Fraud Detector evaluates rules sequentially, first to last, stopping at the first matched rule. Amazon Fraud dectector then provides the outcomes for that single rule.

If you specifiy ALL_MATCHED, Amazon Fraud Detector evaluates all rules and returns the outcomes for all matched rules.

The default behavior is FIRST_MATCHED.

" } } }, @@ -763,6 +810,21 @@ "BOOLEAN" ] }, + "DeleteDetectorRequest":{ + "type":"structure", + "required":["detectorId"], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

The ID of the detector to delete.

" + } + } + }, + "DeleteDetectorResult":{ + "type":"structure", + "members":{ + } + }, "DeleteDetectorVersionRequest":{ "type":"structure", "required":[ @@ -800,6 +862,33 @@ "members":{ } }, + "DeleteRuleVersionRequest":{ + "type":"structure", + "required":[ + "detectorId", + "ruleId", + "ruleVersion" + ], + "members":{ + "detectorId":{ + "shape":"identifier", + "documentation":"

The ID of the detector that includes the rule version to delete.

" + }, + "ruleId":{ + "shape":"identifier", + "documentation":"

The rule ID of the rule version to delete.

" + }, + "ruleVersion":{ + "shape":"nonEmptyString", + "documentation":"

The rule version to delete.

" + } + } + }, + "DeleteRuleVersionResult":{ + "type":"structure", + "members":{ + } + }, "DescribeDetectorRequest":{ "type":"structure", "required":["detectorId"], @@ -1059,6 +1148,10 @@ "createdTime":{ "shape":"time", "documentation":"

The timestamp when the detector version was created.

" + }, + "ruleExecutionMode":{ + "shape":"RuleExecutionMode", + "documentation":"

The execution mode of the rule in the dectector

FIRST_MATCHED indicates that Amazon Fraud Detector evaluates rules sequentially, first to last, stopping at the first matched rule. Amazon Fraud dectector then provides the outcomes for that single rule.

ALL_MATCHED indicates that Amazon Fraud Detector evaluates all rules and returns the outcomes for all matched rules. You can define and edit the rule mode at the detector version level, when it is in draft status.

" } } }, @@ -1272,6 +1365,10 @@ "modelScores":{ "shape":"ListOfModelScores", "documentation":"

The model scores for models used in the detector version.

" + }, + "ruleResults":{ + "shape":"ListOfRuleResults", + "documentation":"

The rule results in the prediction.

" } } }, @@ -1395,6 +1492,10 @@ "type":"list", "member":{"shape":"ModelVersion"} }, + "ListOfRuleResults":{ + "type":"list", + "member":{"shape":"RuleResult"} + }, "ListOfStrings":{ "type":"list", "member":{"shape":"string"} @@ -1930,10 +2031,31 @@ "type":"list", "member":{"shape":"RuleDetail"} }, + "RuleExecutionMode":{ + "type":"string", + "enum":[ + "ALL_MATCHED", + "FIRST_MATCHED" + ] + }, "RuleList":{ "type":"list", "member":{"shape":"Rule"} }, + "RuleResult":{ + "type":"structure", + "members":{ + "ruleId":{ + "shape":"string", + "documentation":"

The rule ID that was matched, based on the rule execution mode.

" + }, + "outcomes":{ + "shape":"ListOfStrings", + "documentation":"

The outcomes of the matched rule, based on the rule execution mode.

" + } + }, + "documentation":"

The rule results.

" + }, "RulesMaxResults":{ "type":"integer", "box":true, @@ -2026,6 +2148,10 @@ "modelVersions":{ "shape":"ListOfModelVersions", "documentation":"

The model versions to include in the detector version.

" + }, + "ruleExecutionMode":{ + "shape":"RuleExecutionMode", + "documentation":"

The rule execution mode to add to the detector.

If you specify FIRST_MATCHED, Amazon Fraud Detector evaluates rules sequentially, first to last, stopping at the first matched rule. Amazon Fraud dectector then provides the outcomes for that single rule.

If you specifiy ALL_MATCHED, Amazon Fraud Detector evaluates all rules and returns the outcomes for all matched rules. You can define and edit the rule mode at the detector version level, when it is in draft status.

The default behavior is FIRST_MATCHED.

" } } }, @@ -2329,7 +2455,7 @@ "type":"string", "max":512, "min":1, - "pattern":"^s3:\\/\\/[^\\s]+$" + "pattern":"^s3:\\/\\/(.+)$" }, "string":{"type":"string"}, "time":{"type":"string"} diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index 09830395bbde..1ed8664f81c2 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/fsx/src/main/resources/codegen-resources/service-2.json b/services/fsx/src/main/resources/codegen-resources/service-2.json index ced5ee83164f..f47135715641 100644 --- a/services/fsx/src/main/resources/codegen-resources/service-2.json +++ b/services/fsx/src/main/resources/codegen-resources/service-2.json @@ -48,7 +48,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a backup of an existing Amazon FSx for Windows File Server file system. Creating regular backups for your file system is a best practice that complements the replication that Amazon FSx for Windows File Server performs for your file system. It also enables you to restore from user modification of data.

If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

  • Creates a new Amazon FSx backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the backup.

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateFileSystem operation returns while the backup's lifecycle state is still CREATING. You can check the file system creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", + "documentation":"

Creates a backup of an existing Amazon FSx file system. Creating regular backups for your file system is a best practice, enabling you to restore a file system from a backup if an issue arises with the original file system.

For Amazon FSx for Lustre file systems, you can create a backup only for file systems with the following configuration:

  • a Persistent deployment type

  • is not linked to an Amazon S3 data respository.

For more information, see https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-backups.html.

If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

  • Creates a new Amazon FSx backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the backup.

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateBackup operation returns while the backup's lifecycle state is still CREATING. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", "idempotent":true }, "CreateDataRepositoryTask":{ @@ -106,12 +106,13 @@ {"shape":"ActiveDirectoryError"}, {"shape":"IncompatibleParameterError"}, {"shape":"InvalidNetworkSettings"}, + {"shape":"InvalidPerUnitStorageThroughput"}, {"shape":"ServiceLimitExceeded"}, {"shape":"BackupNotFound"}, {"shape":"InternalServerError"}, {"shape":"MissingFileSystemConfiguration"} ], - "documentation":"

Creates a new Amazon FSx file system from an existing Amazon FSx for Windows File Server backup.

If a file system with the specified client request token exists and the parameters match, this operation returns the description of the file system. If a client request token specified by the file system exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, this operation does the following:

  • Creates a new Amazon FSx file system from backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the file system.

Parameters like Active Directory, default share name, automatic backup, and backup settings default to the parameters of the file system that was backed up, unless overridden. You can explicitly supply other settings.

By using the idempotent operation, you can retry a CreateFileSystemFromBackup call without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

The CreateFileSystemFromBackup call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

" + "documentation":"

Creates a new Amazon FSx file system from an existing Amazon FSx backup.

If a file system with the specified client request token exists and the parameters match, this operation returns the description of the file system. If a client request token specified by the file system exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, this operation does the following:

  • Creates a new Amazon FSx file system from backup with an assigned ID, and an initial lifecycle state of CREATING.

  • Returns the description of the file system.

Parameters like Active Directory, default share name, automatic backup, and backup settings default to the parameters of the file system that was backed up, unless overridden. You can explicitly supply other settings.

By using the idempotent operation, you can retry a CreateFileSystemFromBackup call without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

The CreateFileSystemFromBackup call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

" }, "DeleteBackup":{ "name":"DeleteBackup", @@ -129,7 +130,7 @@ {"shape":"IncompatibleParameterError"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes an Amazon FSx for Windows File Server backup, deleting its contents. After deletion, the backup no longer exists, and its data is gone.

The DeleteBackup call returns instantly. The backup will not show up in later DescribeBackups calls.

The data in a deleted backup is also deleted and can't be recovered by any means.

", + "documentation":"

Deletes an Amazon FSx backup, deleting its contents. After deletion, the backup no longer exists, and its data is gone.

The DeleteBackup call returns instantly. The backup will not show up in later DescribeBackups calls.

The data in a deleted backup is also deleted and can't be recovered by any means.

", "idempotent":true }, "DeleteFileSystem":{ @@ -164,7 +165,7 @@ {"shape":"BackupNotFound"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns the description of specific Amazon FSx for Windows File Server backups, if a BackupIds value is provided for that backup. Otherwise, it returns all backups owned by your AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all backups, you can optionally specify the MaxResults parameter to limit the number of backups in a response. If more backups remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This action is used in an iterative process to retrieve a list of your backups. DescribeBackups is called first without a NextTokenvalue. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this action, keep the following in mind:

  • The implementation might return fewer than MaxResults file system descriptions while still including a NextToken value.

  • The order of backups returned in the response of one DescribeBackups call and the order of backups returned across the responses of a multi-call iteration is unspecified.

" + "documentation":"

Returns the description of specific Amazon FSx backups, if a BackupIds value is provided for that backup. Otherwise, it returns all backups owned by your AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all backups, you can optionally specify the MaxResults parameter to limit the number of backups in a response. If more backups remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This action is used in an iterative process to retrieve a list of your backups. DescribeBackups is called first without a NextTokenvalue. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this action, keep the following in mind:

  • The implementation might return fewer than MaxResults file system descriptions while still including a NextToken value.

  • The order of backups returned in the response of one DescribeBackups call and the order of backups returned across the responses of a multi-call iteration is unspecified.

" }, "DescribeDataRepositoryTasks":{ "name":"DescribeDataRepositoryTasks", @@ -264,9 +265,10 @@ {"shape":"IncompatibleParameterError"}, {"shape":"InternalServerError"}, {"shape":"FileSystemNotFound"}, - {"shape":"MissingFileSystemConfiguration"} + {"shape":"MissingFileSystemConfiguration"}, + {"shape":"ServiceLimitExceeded"} ], - "documentation":"

Updates a file system configuration.

" + "documentation":"

Use this operation to update the configuration of an existing Amazon FSx file system. For an Amazon FSx for Lustre file system, you can update only the WeeklyMaintenanceStartTime. For an Amazon for Windows File Server file system, you can update the following properties:

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • SelfManagedActiveDirectoryConfiguration

  • StorageCapacity

  • ThroughputCapacity

  • WeeklyMaintenanceStartTime

You can update multiple properties in a single request.

" } }, "shapes":{ @@ -322,17 +324,64 @@ "type":"string", "max":255, "min":1, - "pattern":"^.{1,255}$" + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,255}$" + }, + "AdministrativeAction":{ + "type":"structure", + "members":{ + "AdministrativeActionType":{"shape":"AdministrativeActionType"}, + "ProgressPercent":{ + "shape":"ProgressPercent", + "documentation":"

Provides the percent complete of a STORAGE_OPTIMIZATION administrative action.

" + }, + "RequestTime":{ + "shape":"RequestTime", + "documentation":"

Time that the administrative action request was received.

" + }, + "Status":{ + "shape":"Status", + "documentation":"

Describes the status of the administrative action, as follows:

  • FAILED - Amazon FSx failed to process the administrative action successfully.

  • IN_PROGRESS - Amazon FSx is processing the administrative action.

  • PENDING - Amazon FSx is waiting to process the administrative action.

  • COMPLETED - Amazon FSx has finished processing the administrative task.

  • UPDATED_OPTIMIZING - For a storage capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage optimization process. For more information, see Managing Storage Capacity.

" + }, + "TargetFileSystemValues":{ + "shape":"FileSystem", + "documentation":"

Describes the target StorageCapacity or ThroughputCapacity value provided in the UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative actions.

" + }, + "FailureDetails":{"shape":"AdministrativeActionFailureDetails"} + }, + "documentation":"

Describes a specific Amazon FSx Administrative Action for the current Windows file system.

" + }, + "AdministrativeActionFailureDetails":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"ErrorMessage", + "documentation":"

Error message providing details about the failure.

" + } + }, + "documentation":"

Provides information about a failed administrative action.

" + }, + "AdministrativeActionType":{ + "type":"string", + "documentation":"

Describes the type of administrative action, as follows:

  • FILE_SYSTEM_UPDATE - A file system update administrative action initiated by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system). A

  • STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION task starts. Storage optimization is the process of migrating the file system data to the new, larger disks. You can track the storage migration progress using the ProgressPercent property. When STORAGE_OPTIMIZATION completes successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing Storage Capacity.

", + "enum":[ + "FILE_SYSTEM_UPDATE", + "STORAGE_OPTIMIZATION" + ] + }, + "AdministrativeActions":{ + "type":"list", + "member":{"shape":"AdministrativeAction"}, + "max":50 }, "ArchivePath":{ "type":"string", "max":900, "min":3, - "pattern":"^.{3,900}$" + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4357}$" }, "AutomaticBackupRetentionDays":{ "type":"integer", - "documentation":"

The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 35 days.

", + "documentation":"

The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 35 days. The default is 0.

", "max":35, "min":0 }, @@ -388,7 +437,7 @@ "documentation":"

The configuration of the self-managed Microsoft Active Directory (AD) to which the Windows File Server instance is joined.

" } }, - "documentation":"

A backup of an Amazon FSx for Windows File Server file system. You can create a new file system from a backup to protect against data loss.

" + "documentation":"

A backup of an Amazon FSx for file system.

" }, "BackupFailureDetails":{ "type":"structure", @@ -537,12 +586,12 @@ }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", + "documentation":"

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", "idempotencyToken":true }, "Tags":{ "shape":"Tags", - "documentation":"

The tags to apply to the backup at backup creation. The key value of the Name tag appears in the console as the backup name.

" + "documentation":"

The tags to apply to the backup at backup creation. The key value of the Name tag appears in the console as the backup name. If you have set CopyTagsToBackups to true, and you specify one or more tags using the CreateBackup action, no existing tags on the file system are copied from the file system to the backup.

" } }, "documentation":"

The request object for the CreateBackup operation.

" @@ -604,7 +653,7 @@ "BackupId":{"shape":"BackupId"}, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", + "documentation":"

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", "idempotencyToken":true }, "SubnetIds":{ @@ -623,6 +672,7 @@ "shape":"CreateFileSystemWindowsConfiguration", "documentation":"

The configuration for this Microsoft Windows file system.

" }, + "LustreConfiguration":{"shape":"CreateFileSystemLustreConfiguration"}, "StorageType":{ "shape":"StorageType", "documentation":"

Sets the storage type for the Windows file system you're creating from a backup. Valid values are SSD and HDD.

  • Set to SSD to use solid state drive storage. Supported on all Windows deployment types.

  • Set to HDD to use hard disk drive storage. Supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types.

Default value is SSD.

HDD and SSD storage types have different minimum storage capacity requirements. A restored file system's storage capacity is tied to the file system that was backed up. You can create a file system that uses HDD storage from a backup of a file system that used SSD storage only if the original SSD file system had a storage capacity of at least 2000 GiB.

" @@ -645,7 +695,7 @@ "members":{ "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The preferred time to perform weekly maintenance, in the UTC time zone.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

" }, "ImportPath":{ "shape":"ArchivePath", @@ -661,11 +711,17 @@ }, "DeploymentType":{ "shape":"LustreDeploymentType", - "documentation":"

(Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

Choose PERSISTENT_1 deployment type for longer-term storage and workloads and encryption of data in transit. To learn more about deployment types, see FSx for Lustre Deployment Options.

Encryption of data in-transit is automatically enabled when you access a SCRATCH_2 or PERSISTENT_1 file system from Amazon EC2 instances that support this feature. (Default = SCRATCH_1)

Encryption of data in-transit for SCRATCH_2 and PERSISTENT_1 deployment types is supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data in Transit.

" + "documentation":"

Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

This option can only be set for for PERSISTENT_1 deployments types.

Choose PERSISTENT_1 deployment type for longer-term storage and workloads and encryption of data in transit. To learn more about deployment types, see FSx for Lustre Deployment Options.

Encryption of data in-transit is automatically enabled when you access a SCRATCH_2 or PERSISTENT_1 file system from Amazon EC2 instances that support this feature. (Default = SCRATCH_1)

Encryption of data in-transit for SCRATCH_2 and PERSISTENT_1 deployment types is supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data in Transit.

" }, "PerUnitStorageThroughput":{ "shape":"PerUnitStorageThroughput", "documentation":"

Required for the PERSISTENT_1 deployment type, describes the amount of read and write throughput for each 1 tebibyte of storage, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 117 MB/s of file system throughput. You pay for the amount of throughput that you provision.

Valid values are 50, 100, 200.

" + }, + "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, + "AutomaticBackupRetentionDays":{"shape":"AutomaticBackupRetentionDays"}, + "CopyTagsToBackups":{ + "shape":"Flag", + "documentation":"

A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.

" } }, "documentation":"

The Lustre configuration for the file system being created.

" @@ -680,7 +736,7 @@ "members":{ "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", + "documentation":"

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", "idempotencyToken":true }, "FileSystemType":{ @@ -693,7 +749,7 @@ }, "StorageType":{ "shape":"StorageType", - "documentation":"

Sets the storage type for the Amazon FSx for Windows file system you're creating. Valid values are SSD and HDD.

  • Set to SSD to use solid state drive storage. SSD is supported on all Windows deployment types.

  • Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types.

Default value is SSD. For more information, see Storage Type Options in the Amazon FSx for Windows User Guide.

" + "documentation":"

Sets the storage type for the Amazon FSx for Windows file system you're creating. Valid values are SSD and HDD.

  • Set to SSD to use solid state drive storage. SSD is supported on all Windows deployment types.

  • Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types.

Default value is SSD. For more information, see Storage Type Options in the Amazon FSx for Windows User Guide.

" }, "SubnetIds":{ "shape":"SubnetIds", @@ -749,7 +805,7 @@ }, "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

" }, "DailyAutomaticBackupStartTime":{ "shape":"DailyTime", @@ -935,7 +991,7 @@ "type":"string", "max":4096, "min":0, - "pattern":"^.{0,4096}$" + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{0,4096}$" }, "DataRepositoryTaskPaths":{ "type":"list", @@ -983,7 +1039,7 @@ }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This is automatically filled on your behalf when using the AWS CLI or SDK.

", + "documentation":"

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This is automatically filled on your behalf when using the AWS CLI or SDK.

", "idempotencyToken":true } }, @@ -1003,6 +1059,34 @@ }, "documentation":"

The response object for DeleteBackup operation.

" }, + "DeleteFileSystemLustreConfiguration":{ + "type":"structure", + "members":{ + "SkipFinalBackup":{ + "shape":"Flag", + "documentation":"

Set SkipFinalBackup to false if you want to take a final backup of the file system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the DeleteFileSystem operation is invoked. (Default = true)

" + }, + "FinalBackupTags":{ + "shape":"Tags", + "documentation":"

Use if SkipFinalBackup is set to false, and you want to apply an array of tags to the final backup. If you have set the file system property CopyTagsToBackups to true, and you specify one or more FinalBackupTags when deleting a file system, Amazon FSx will not copy any existing file system tags to the backup.

" + } + }, + "documentation":"

The configuration object for the Amazon FSx for Lustre file system being deleted in the DeleteFileSystem operation.

" + }, + "DeleteFileSystemLustreResponse":{ + "type":"structure", + "members":{ + "FinalBackupId":{ + "shape":"BackupId", + "documentation":"

The ID of the final backup for this file system.

" + }, + "FinalBackupTags":{ + "shape":"Tags", + "documentation":"

The set of tags applied to the final backup.

" + } + }, + "documentation":"

The response object for the Amazon FSx for Lustre file system being deleted in the DeleteFileSystem operation.

" + }, "DeleteFileSystemRequest":{ "type":"structure", "required":["FileSystemId"], @@ -1013,10 +1097,11 @@ }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This is automatically filled on your behalf when using the AWS CLI or SDK.

", + "documentation":"

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This is automatically filled on your behalf when using the AWS CLI or SDK.

", "idempotencyToken":true }, - "WindowsConfiguration":{"shape":"DeleteFileSystemWindowsConfiguration"} + "WindowsConfiguration":{"shape":"DeleteFileSystemWindowsConfiguration"}, + "LustreConfiguration":{"shape":"DeleteFileSystemLustreConfiguration"} }, "documentation":"

The request object for DeleteFileSystem operation.

" }, @@ -1031,7 +1116,8 @@ "shape":"FileSystemLifecycle", "documentation":"

The file system lifecycle for the deletion request. Should be DELETING.

" }, - "WindowsResponse":{"shape":"DeleteFileSystemWindowsResponse"} + "WindowsResponse":{"shape":"DeleteFileSystemWindowsResponse"}, + "LustreResponse":{"shape":"DeleteFileSystemLustreResponse"} }, "documentation":"

The response object for the DeleteFileSystem operation.

" }, @@ -1068,19 +1154,19 @@ "members":{ "BackupIds":{ "shape":"BackupIds", - "documentation":"

(Optional) IDs of the backups you want to retrieve (String). This overrides any filters. If any IDs are not found, BackupNotFound will be thrown.

" + "documentation":"

IDs of the backups you want to retrieve (String). This overrides any filters. If any IDs are not found, BackupNotFound will be thrown.

" }, "Filters":{ "shape":"Filters", - "documentation":"

(Optional) Filters structure. Supported names are file-system-id and backup-type.

" + "documentation":"

Filters structure. Supported names are file-system-id and backup-type.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Maximum number of backups to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

" + "documentation":"

Maximum number of backups to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

(Optional) Opaque pagination token returned from a previous DescribeBackups operation (String). If a token present, the action continues the list from where the returning call left off.

" + "documentation":"

Opaque pagination token returned from a previous DescribeBackups operation (String). If a token present, the action continues the list from where the returning call left off.

" } }, "documentation":"

The request object for DescribeBackups operation.

" @@ -1129,15 +1215,15 @@ "members":{ "FileSystemIds":{ "shape":"FileSystemIds", - "documentation":"

(Optional) IDs of the file systems whose descriptions you want to retrieve (String).

" + "documentation":"

IDs of the file systems whose descriptions you want to retrieve (String).

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Maximum number of file systems to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

" + "documentation":"

Maximum number of file systems to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

(Optional) Opaque pagination token returned from a previous DescribeFileSystems operation (String). If a token present, the action continues the list from where the returning call left off.

" + "documentation":"

Opaque pagination token returned from a previous DescribeFileSystems operation (String). If a token present, the action continues the list from where the returning call left off.

" } }, "documentation":"

The request object for DescribeFileSystems operation.

" @@ -1173,7 +1259,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^.{1,256}$" + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,256}$" }, "DnsIps":{ "type":"list", @@ -1253,7 +1339,11 @@ "shape":"WindowsFileSystemConfiguration", "documentation":"

The configuration for this Microsoft Windows file system.

" }, - "LustreConfiguration":{"shape":"LustreFileSystemConfiguration"} + "LustreConfiguration":{"shape":"LustreFileSystemConfiguration"}, + "AdministrativeActions":{ + "shape":"AdministrativeActions", + "documentation":"

A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Windows file system that you have initiated using the UpdateFileSystem action.

" + } }, "documentation":"

A description of a specific Amazon FSx file system.

" }, @@ -1261,7 +1351,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^.{1,256}$" + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,256}$" }, "FileSystemFailureDetails":{ "type":"structure", @@ -1353,7 +1443,8 @@ "documentation":"

The name for a filter.

", "enum":[ "file-system-id", - "backup-type" + "backup-type", + "file-system-type" ] }, "FilterValue":{ @@ -1456,11 +1547,11 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

(Optional) Maximum number of tags to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

" + "documentation":"

Maximum number of tags to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

(Optional) Opaque pagination token returned from a previous ListTagsForResource operation (String). If a token present, the action continues the list from where the returning call left off.

" + "documentation":"

Opaque pagination token returned from a previous ListTagsForResource operation (String). If a token present, the action continues the list from where the returning call left off.

" } }, "documentation":"

The request object for ListTagsForResource operation.

" @@ -1492,12 +1583,12 @@ "members":{ "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The UTC time that you want to begin your weekly maintenance window.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

" }, "DataRepositoryConfiguration":{"shape":"DataRepositoryConfiguration"}, "DeploymentType":{ "shape":"LustreDeploymentType", - "documentation":"

The deployment type of the FSX for Lustre file system.

" + "documentation":"

The deployment type of the FSX for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

The PERSISTENT_1 deployment type is used for longer-term storage and workloads and encryption of data in transit. To learn more about deployment types, see FSx for Lustre Deployment Options. (Default = SCRATCH_1)

" }, "PerUnitStorageThroughput":{ "shape":"PerUnitStorageThroughput", @@ -1506,6 +1597,12 @@ "MountName":{ "shape":"LustreFileSystemMountName", "documentation":"

You use the MountName value when mounting the file system.

For the SCRATCH_1 deployment type, this value is always \"fsx\". For SCRATCH_2 and PERSISTENT_1 deployment types, this value is a string that is unique within an AWS Region.

" + }, + "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, + "AutomaticBackupRetentionDays":{"shape":"AutomaticBackupRetentionDays"}, + "CopyTagsToBackups":{ + "shape":"Flag", + "documentation":"

A boolean flag indicating whether tags on the file system should be copied to backups. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. (Default = false)

" } }, "documentation":"

The configuration for the Amazon FSx for Lustre file system.

" @@ -1519,6 +1616,7 @@ "MaxResults":{ "type":"integer", "documentation":"

The maximum number of resources to return in the response. This value must be an integer greater than zero.

", + "max":2147483647, "min":1 }, "Megabytes":{ @@ -1577,7 +1675,7 @@ "type":"string", "max":2000, "min":1, - "pattern":"^.{1,2000}$" + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,2000}$" }, "Parameter":{ "type":"string", @@ -1603,6 +1701,7 @@ "type":"string", "enum":["FAILED_FILES_ONLY"] }, + "RequestTime":{"type":"timestamp"}, "ResourceARN":{ "type":"string", "documentation":"

The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify AWS resources. We require an ARN when you need to specify a resource unambiguously across all of AWS. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", @@ -1727,7 +1826,7 @@ "documentation":"

A list of up to two IP addresses of DNS servers or domain controllers in the self-managed AD directory.

" } }, - "documentation":"

The configuration that Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft Active Directory (AD) directory.

" + "documentation":"

The configuration that Amazon FSx uses to join the Windows File Server instance to a self-managed Microsoft Active Directory (AD) directory.

" }, "ServiceLimit":{ "type":"string", @@ -1753,9 +1852,20 @@ "exception":true }, "StartTime":{"type":"timestamp"}, + "Status":{ + "type":"string", + "enum":[ + "FAILED", + "IN_PROGRESS", + "PENDING", + "COMPLETED", + "UPDATED_OPTIMIZING" + ] + }, "StorageCapacity":{ "type":"integer", "documentation":"

The storage capacity for your Amazon FSx file system, in gibibytes.

", + "max":2147483647, "min":0 }, "StorageType":{ @@ -1895,8 +2005,10 @@ "members":{ "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The preferred time to perform weekly maintenance, in the UTC time zone.

" - } + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

" + }, + "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, + "AutomaticBackupRetentionDays":{"shape":"AutomaticBackupRetentionDays"} }, "documentation":"

The configuration object for Amazon FSx for Lustre file systems used in the UpdateFileSystem operation.

" }, @@ -1904,15 +2016,22 @@ "type":"structure", "required":["FileSystemId"], "members":{ - "FileSystemId":{"shape":"FileSystemId"}, + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

Identifies the file system that you are updating.

" + }, "ClientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", + "documentation":"

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", "idempotencyToken":true }, + "StorageCapacity":{ + "shape":"StorageCapacity", + "documentation":"

Use this parameter to increase the storage capacity of an Amazon FSx for Windows File Server file system. Specifies the storage capacity target value, GiB, for the file system you're updating. The storage capacity target value must be at least 10 percent (%) greater than the current storage capacity value. In order to increase storage capacity, the file system needs to have at least 16 MB/s of throughput capacity. You cannot make a storage capacity increase request if there is an existing storage capacity increase request in progress. For more information, see Managing Storage Capacity.

" + }, "WindowsConfiguration":{ "shape":"UpdateFileSystemWindowsConfiguration", - "documentation":"

The configuration update for this Microsoft Windows file system. The only supported options are for backup and maintenance and for self-managed Active Directory configuration.

" + "documentation":"

The configuration updates for an Amazon FSx for Windows File Server file system.

" }, "LustreConfiguration":{"shape":"UpdateFileSystemLustreConfiguration"} }, @@ -1933,22 +2052,26 @@ "members":{ "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The preferred time to perform weekly maintenance, in the UTC time zone.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Where d is the weekday number, from 1 through 7, with 1 = Monday and 7 = Sunday.

" }, "DailyAutomaticBackupStartTime":{ "shape":"DailyTime", - "documentation":"

The preferred time to take daily automatic backups, in the UTC time zone.

" + "documentation":"

The preferred time to start the daily automatic backup, in the UTC time zone, for example, 02:00

" }, "AutomaticBackupRetentionDays":{ "shape":"AutomaticBackupRetentionDays", - "documentation":"

The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 35 days.

" + "documentation":"

The number of days to retain automatic daily backups. Setting this to zero (0) disables automatic daily backups. You can retain automatic daily backups for a maximum of 35 days. For more information, see Working with Automatic Daily Backups.

" + }, + "ThroughputCapacity":{ + "shape":"MegabytesPerSecond", + "documentation":"

Sets the target value for a file system's throughput capacity, in MB/s, that you are updating the file system to. Valid values are 8, 16, 32, 64, 128, 256, 512, 1024, 2048. You cannot make a throughput capacity update request if there is an existing throughput capacity update request in progress. For more information, see Managing Throughput Capacity.

" }, "SelfManagedActiveDirectoryConfiguration":{ "shape":"SelfManagedActiveDirectoryConfigurationUpdates", - "documentation":"

The configuration Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft AD directory.

" + "documentation":"

The configuration Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft AD directory. You cannot make a self-managed Microsoft AD update request if there is an existing self-managed Microsoft AD update request in progress.

" } }, - "documentation":"

Updates the Microsoft Windows configuration for an existing Amazon FSx for Windows File Server file system. Amazon FSx overwrites existing properties with non-null values provided in the request. If you don't specify a non-null value for a property, that property is not updated.

" + "documentation":"

Updates the configuration for an existing Amazon FSx for Windows File Server file system. Amazon FSx only overwrites existing properties with non-null values provided in the request.

" }, "VpcId":{ "type":"string", @@ -2006,7 +2129,7 @@ }, "WeeklyMaintenanceStartTime":{ "shape":"WeeklyTime", - "documentation":"

The preferred time to perform weekly maintenance, in the UTC time zone.

" + "documentation":"

The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

" }, "DailyAutomaticBackupStartTime":{ "shape":"DailyTime", diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index a407f9c97e38..0e9bc1f4bc5d 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/gamelift/src/main/resources/codegen-resources/service-2.json b/services/gamelift/src/main/resources/codegen-resources/service-2.json index 1f0ff2d45fc3..bf9e17b76a2e 100755 --- a/services/gamelift/src/main/resources/codegen-resources/service-2.json +++ b/services/gamelift/src/main/resources/codegen-resources/service-2.json @@ -28,6 +28,24 @@ ], "documentation":"

Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match, the ticket status is returned to SEARCHING to find a new match. For tickets where one or more players failed to respond, the ticket status is set to CANCELLED, and processing is terminated. A new matchmaking request for these players can be submitted as needed.

Learn more

Add FlexMatch to a Game Client

FlexMatch Events Reference

Related operations

" }, + "ClaimGameServer":{ + "name":"ClaimGameServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ClaimGameServerInput"}, + "output":{"shape":"ClaimGameServerOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"OutOfCapacityException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Locates an available game server and temporarily reserves it to host gameplay and players. This action is called by a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, GameLift FleetIQ searches for an available game server in the specified game server group, places the game server in \"claimed\" status for 60 seconds, and returns connection information back to the requester so that players can connect to the game server.

There are two ways you can claim a game server. For the first option, you provide a game server group ID only, which prompts GameLift FleetIQ to search for an available game server in the specified group and claim it. With this option, GameLift FleetIQ attempts to consolidate gameplay on as few instances as possible to minimize hosting costs. For the second option, you request a specific game server by its ID. This option results in a less efficient claiming process because it does not take advantage of consolidation and may fail if the requested game server is unavailable.

To claim a game server, identify a game server group and (optionally) a game server ID. If your game requires that game data be provided to the game server at the start of a game, such as a game map or player information, you can provide it in your claim request.

When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE, while the claim status is set to CLAIMED for up to 60 seconds. This time period allows the game server to be prompted to update its status to UTILIZED (using UpdateGameServer). If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request.

If you try to claim a specific game server, this request will fail in the following cases: (1) if the game server utilization status is UTILIZED, (2) if the game server claim status is CLAIMED, or (3) if the instance that the game server is running on is flagged as draining.

Learn more

GameLift FleetIQ Guide

Related operations

" + }, "CreateAlias":{ "name":"CreateAlias", "http":{ @@ -61,7 +79,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates a new Amazon GameLift build record for your game server binary files and points to the location of your game server build files in an Amazon Simple Storage Service (Amazon S3) location.

Game server binaries must be combined into a zip file for use with Amazon GameLift.

To create new builds directly from a file directory, use the AWS CLI command upload-build . This helper command uploads build files and creates a new build record in one step, and automatically handles the necessary permissions.

The CreateBuild operation should be used only in the following scenarios:

  • To create a new game build with build files that are in an Amazon S3 bucket under your own AWS account. To use this option, you must first give Amazon GameLift access to that Amazon S3 bucket. Then call CreateBuild and specify a build name, operating system, and the Amazon S3 storage location of your game build.

  • To upload build files directly to Amazon GameLift's Amazon S3 account. To use this option, first call CreateBuild and specify a build name and operating system. This action creates a new build record and returns an Amazon S3 storage location (bucket and key only) and temporary access credentials. Use the credentials to manually upload your build file to the provided storage location (see the Amazon S3 topic Uploading Objects). You can upload build files to the GameLift Amazon S3 location only once.

If successful, this operation creates a new build record with a unique build ID and places it in INITIALIZED status. You can use DescribeBuild to check the status of your build. A build must be in READY status before it can be used to create fleets.

Learn more

Uploading Your Game https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html

Create a Build with Files in Amazon S3

Related operations

" + "documentation":"

Creates a new Amazon GameLift build resource for your game server binary files. Game server binaries must be combined into a zip file for use with Amazon GameLift.

When setting up a new game build for GameLift, we recommend using the AWS CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to a GameLift Amazon S3 location, and (2) it creates a new build resource.

The CreateBuild operation can used in the following scenarios:

  • To create a new game build with build files that are in an S3 location under an AWS account that you control. To use this option, you must first give Amazon GameLift access to the S3 bucket. With permissions in place, call CreateBuild and specify a build name, operating system, and the S3 storage location of your game build.

  • To directly upload your build files to a GameLift S3 location. To use this option, first call CreateBuild and specify a build name and operating system. This action creates a new build resource and also returns an S3 location with temporary access credentials. Use the credentials to manually upload your build files to the specified S3 location. For more information, see Uploading Objects in the Amazon S3 Developer Guide. Build files can be uploaded to the GameLift S3 location once only; that can't be updated.

If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it.

Learn more

Uploading Your Game

Create a Build with Files in Amazon S3

Related operations

" }, "CreateFleet":{ "name":"CreateFleet", @@ -80,7 +98,24 @@ {"shape":"UnauthorizedException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Creates a new fleet to run your game servers. whether they are custom game builds or Realtime Servers with game-specific script. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can host multiple game sessions. When creating a fleet, you choose the hardware specifications, set some configuration options, and specify the game server to deploy on the new fleet.

To create a new fleet, you must provide the following: (1) a fleet name, (2) an EC2 instance type and fleet type (spot or on-demand), (3) the build ID for your game build or script ID if using Realtime Servers, and (4) a runtime configuration, which determines how game servers will run on each instance in the fleet.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks. You can track the process of a fleet by checking the fleet status or by monitoring fleet creation events:

  • Creates a fleet record. Status: NEW.

  • Begins writing events to the fleet event log, which can be accessed in the Amazon GameLift console.

  • Sets the fleet's target capacity to 1 (desired instances), which triggers Amazon GameLift to start one new EC2 instance.

  • Downloads the game build or Realtime script to the new instance and installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING.

  • Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, Amazon GameLift staggers each process launch by a few seconds. Status: ACTIVATING.

  • Sets the fleet's status to ACTIVE as soon as one server process is ready to host a game session.

Learn more

Setting Up Fleets

Debug Fleet Creation Issues

Related operations

" + "documentation":"

Creates a new fleet to run your game servers. whether they are custom game builds or Realtime Servers with game-specific script. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can host multiple game sessions. When creating a fleet, you choose the hardware specifications, set some configuration options, and specify the game server to deploy on the new fleet.

To create a new fleet, provide the following: (1) a fleet name, (2) an EC2 instance type and fleet type (spot or on-demand), (3) the build ID for your game build or script ID if using Realtime Servers, and (4) a runtime configuration, which determines how game servers will run on each instance in the fleet.

If the CreateFleet call is successful, Amazon GameLift performs the following tasks. You can track the process of a fleet by checking the fleet status or by monitoring fleet creation events:

  • Creates a fleet resource. Status: NEW.

  • Begins writing events to the fleet event log, which can be accessed in the Amazon GameLift console.

  • Sets the fleet's target capacity to 1 (desired instances), which triggers Amazon GameLift to start one new EC2 instance.

  • Downloads the game build or Realtime script to the new instance and installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING.

  • Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, Amazon GameLift staggers each process launch by a few seconds. Status: ACTIVATING.

  • Sets the fleet's status to ACTIVE as soon as one server process is ready to host a game session.

Learn more

Setting Up Fleets

Debug Fleet Creation Issues

Related operations

" + }, + "CreateGameServerGroup":{ + "name":"CreateGameServerGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGameServerGroupInput"}, + "output":{"shape":"CreateGameServerGroupOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Creates a GameLift FleetIQ game server group to manage a collection of EC2 instances for game hosting. In addition to creating the game server group, this action also creates an Auto Scaling group in your AWS account and establishes a link between the two groups. You have full control over configuration of the Auto Scaling group, but GameLift FleetIQ routinely certain Auto Scaling group properties in order to optimize the group's instances for low-cost game hosting. You can view the status of your game server groups in the GameLift Console. Game server group metrics and events are emitted to Amazon CloudWatch.

Prior creating a new game server group, you must set up the following:

  • An EC2 launch template. The template provides configuration settings for a set of EC2 instances and includes the game server build that you want to deploy and run on each instance. For more information on creating a launch template, see Launching an Instance from a Launch Template in the Amazon EC2 User Guide.

  • An IAM role. The role sets up limited access to your AWS account, allowing GameLift FleetIQ to create and manage the EC2 Auto Scaling group, get instance data, and emit metrics and events to CloudWatch. For more information on setting up an IAM permissions policy with principal access for GameLift, see Specifying a Principal in a Policy in the Amazon S3 Developer Guide.

To create a new game server group, provide a name and specify the IAM role and EC2 launch template. You also need to provide a list of instance types to be used in the group and set initial maximum and minimum limits on the group's instance count. You can optionally set an autoscaling policy with target tracking based on a GameLift FleetIQ metric.

Once the game server group and corresponding Auto Scaling group are created, you have full access to change the Auto Scaling group's configuration as needed. Keep in mind, however, that some properties are periodically updated by GameLift FleetIQ as it balances the group's instances based on availability and cost.

Learn more

GameLift FleetIQ Guide

Updating a GameLift FleetIQ-Linked Auto Scaling Group

Related operations

" }, "CreateGameSession":{ "name":"CreateGameSession", @@ -119,7 +154,7 @@ {"shape":"LimitExceededException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Establishes a new queue for processing requests to place new game sessions. A queue identifies where new game sessions can be hosted -- by specifying a list of destinations (fleets or aliases) -- and how long requests can wait in the queue before timing out. You can set up a queue to try to place game sessions on fleets in multiple Regions. To add placement requests to a queue, call StartGameSessionPlacement and reference the queue name.

Destination order. When processing a request for a game session, Amazon GameLift tries each destination in order until it finds one with available resources to host the new game session. A queue's default order is determined by how destinations are listed. The default order is overridden when a game session placement request provides player latency information. Player latency information enables Amazon GameLift to prioritize destinations where players report the lowest average latency, as a result placing the new game session where the majority of players will have the best possible gameplay experience.

Player latency policies. For placement requests containing player latency information, use player latency policies to protect individual players from very high latencies. With a latency cap, even when a destination can deliver a low latency for most players, the game is not placed where any individual player is reporting latency higher than a policy's maximum. A queue can have multiple latency policies, which are enforced consecutively starting with the policy with the lowest latency cap. Use multiple policies to gradually relax latency controls; for example, you might set a policy with a low latency cap for the first 60 seconds, a second policy with a higher cap for the next 60 seconds, etc.

To create a new queue, provide a name, timeout value, a list of destinations and, if desired, a set of latency policies. If successful, a new queue object is returned.

" + "documentation":"

Establishes a new queue for processing requests to place new game sessions. A queue identifies where new game sessions can be hosted -- by specifying a list of destinations (fleets or aliases) -- and how long requests can wait in the queue before timing out. You can set up a queue to try to place game sessions on fleets in multiple Regions. To add placement requests to a queue, call StartGameSessionPlacement and reference the queue name.

Destination order. When processing a request for a game session, Amazon GameLift tries each destination in order until it finds one with available resources to host the new game session. A queue's default order is determined by how destinations are listed. The default order is overridden when a game session placement request provides player latency information. Player latency information enables Amazon GameLift to prioritize destinations where players report the lowest average latency, as a result placing the new game session where the majority of players will have the best possible gameplay experience.

Player latency policies. For placement requests containing player latency information, use player latency policies to protect individual players from very high latencies. With a latency cap, even when a destination can deliver a low latency for most players, the game is not placed where any individual player is reporting latency higher than a policy's maximum. A queue can have multiple latency policies, which are enforced consecutively starting with the policy with the lowest latency cap. Use multiple policies to gradually relax latency controls; for example, you might set a policy with a low latency cap for the first 60 seconds, a second policy with a higher cap for the next 60 seconds, etc.

To create a new queue, provide a name, timeout value, a list of destinations and, if desired, a set of latency policies. If successful, a new queue object is returned.

Learn more

Design a Game Session Queue

Create a Game Session Queue

Related operations

" }, "CreateMatchmakingConfiguration":{ "name":"CreateMatchmakingConfiguration", @@ -272,7 +307,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Deletes a build. This action permanently deletes the build record and any uploaded build files.

To delete a build, specify its ID. Deleting a build does not affect the status of any active fleets using the build, but you can no longer create new fleets with the deleted build.

Learn more

Working with Builds

Related operations

" + "documentation":"

Deletes a build. This action permanently deletes the build resource and any uploaded build files. Deleting a build does not affect the status of any active fleets using the build, but you can no longer create new fleets with the deleted build.

To delete a build, specify the build ID.

Learn more

Upload a Custom Server Build

Related operations

" }, "DeleteFleet":{ "name":"DeleteFleet", @@ -289,7 +324,23 @@ {"shape":"InvalidRequestException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

If the fleet being deleted has a VPC peering connection, you first need to get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. You do not need to explicitly delete the VPC peering connection--this is done as part of the delete fleet process.

This action removes the fleet's resources and the fleet record. Once a fleet is deleted, you can no longer use that fleet.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Deletes everything related to a fleet. Before deleting a fleet, you must set the fleet's desired capacity to zero. See UpdateFleetCapacity.

If the fleet being deleted has a VPC peering connection, you first need to get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. You do not need to explicitly delete the VPC peering connection--this is done as part of the delete fleet process.

This action removes the fleet and its resources. Once a fleet is deleted, you can no longer use any of the resource in that fleet.

Learn more

Setting up GameLift Fleets

Related operations

" + }, + "DeleteGameServerGroup":{ + "name":"DeleteGameServerGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGameServerGroupInput"}, + "output":{"shape":"DeleteGameServerGroupOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Terminates a game server group and permanently deletes the game server group record. You have several options for how these resources are impacted when deleting the game server group. Depending on the type of delete action selected, this action may affect three types of resources: the game server group, the corresponding Auto Scaling group, and all game servers currently running in the group.

To delete a game server group, identify the game server group to delete and specify the type of delete action to initiate. Game server groups can only be deleted if they are in ACTIVE or ERROR status.

If the delete request is successful, a series of actions are kicked off. The game server group status is changed to DELETE_SCHEDULED, which prevents new game servers from being registered and stops autoscaling activity. Once all game servers in the game server group are de-registered, GameLift FleetIQ can begin deleting resources. If any of the delete actions fail, the game server group is placed in ERROR status.

GameLift FleetIQ emits delete events to Amazon CloudWatch.

Learn more

GameLift FleetIQ Guide

Related operations

" }, "DeleteGameSessionQueue":{ "name":"DeleteGameSessionQueue", @@ -306,7 +357,7 @@ {"shape":"UnauthorizedException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

Deletes a game session queue. This action means that any StartGameSessionPlacement requests that reference this queue will fail. To delete a queue, specify the queue name.

" + "documentation":"

Deletes a game session queue. This action means that any StartGameSessionPlacement requests that reference this queue will fail. To delete a queue, specify the queue name.

Learn more

Using Multi-Region Queues

Related operations

" }, "DeleteMatchmakingConfiguration":{ "name":"DeleteMatchmakingConfiguration", @@ -405,6 +456,21 @@ ], "documentation":"

Removes a VPC peering connection. To delete the connection, you must have a valid authorization for the VPC peering connection that you want to delete. You can check for an authorization by calling DescribeVpcPeeringAuthorizations or request a new one using CreateVpcPeeringAuthorization.

Once a valid authorization exists, call this operation from the AWS account that is used to manage the Amazon GameLift fleets. Identify the connection to delete by the connection ID and fleet ID. If successful, the connection is removed.

" }, + "DeregisterGameServer":{ + "name":"DeregisterGameServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterGameServerInput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Removes the game server resource from the game server group. As a result of this action, the de-registered game server can no longer be claimed and will not returned in a list of active game servers.

To de-register a game server, specify the game server group and game server ID. If successful, this action emits a CloudWatch event with termination time stamp and reason.

Learn more

GameLift FleetIQ Guide

Related operations

" + }, "DescribeAlias":{ "name":"DescribeAlias", "http":{ @@ -435,7 +501,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves properties for a build. To request a build record, specify a build ID. If successful, an object containing the build properties is returned.

Learn more

Working with Builds

Related operations

" + "documentation":"

Retrieves properties for a custom game build. To request a build resource, specify a build ID. If successful, an object containing the build properties is returned.

Learn more

Upload a Custom Server Build

Related operations

" }, "DescribeEC2InstanceLimits":{ "name":"DescribeEC2InstanceLimits", @@ -450,7 +516,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the following information for the specified EC2 instance type:

  • maximum number of instances allowed per AWS account (service limit)

  • current usage level for the AWS account

Service limits vary depending on Region. Available Regions for Amazon GameLift can be found in the AWS Management Console for Amazon GameLift (see the drop-down list in the upper right corner).

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves the following information for the specified EC2 instance type:

  • Maximum number of instances allowed per AWS account (service limit).

  • Current usage for the AWS account.

To learn more about the capabilities of each instance type, see Amazon EC2 Instance Types. Note that the instance types offered may vary depending on the region.

Learn more

Setting up GameLift Fleets

Related operations

" }, "DescribeFleetAttributes":{ "name":"DescribeFleetAttributes", @@ -466,7 +532,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves fleet properties, including metadata, status, and configuration, for one or more fleets. You can request attributes for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves core properties, including configuration, status, and metadata, for a fleet.

To get attributes for one or more fleets, provide a list of fleet IDs or fleet ARNs. To get attributes for all fleets, do not specify a fleet identifier. When requesting attributes for multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each fleet requested, unless the fleet identifier is not found.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed number.

Learn more

Setting up GameLift Fleets

Related operations

" }, "DescribeFleetCapacity":{ "name":"DescribeFleetCapacity", @@ -482,7 +548,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the current status of fleet capacity for one or more fleets. This information includes the number of instances that have been requested for the fleet and the number currently active. You can request capacity for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. When specifying a list of fleet IDs, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves the current capacity statistics for one or more fleets. These statistics present a snapshot of the fleet's instances and provide insight on current or imminent scaling activity. To get statistics on game hosting activity in the fleet, see DescribeFleetUtilization.

You can request capacity for all fleets or specify a list of one or more fleet identifiers. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. When a list of fleet IDs is provided, attribute objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Setting up GameLift Fleets

GameLift Metrics for Fleets

Related operations

" }, "DescribeFleetEvents":{ "name":"DescribeFleetEvents", @@ -498,7 +564,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves entries from the specified fleet's event log. You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of event log entries matching the request are returned.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves entries from the specified fleet's event log. You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a collection of event log entries matching the request are returned.

Learn more

Setting up GameLift Fleets

Related operations

" }, "DescribeFleetPortSettings":{ "name":"DescribeFleetPortSettings", @@ -514,7 +580,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the inbound connection permissions for a fleet. Connection permissions include a range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. To get a fleet's inbound connection permissions, specify a fleet ID. If successful, a collection of IpPermission objects is returned for the requested fleet ID. If the requested fleet has been deleted, the result set is empty.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves a fleet's inbound connection permissions. Connection permissions specify the range of IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game sessions that are running on instances in the fleet use connections that fall in this range.

To get a fleet's inbound connection permissions, specify the fleet's unique identifier. If successful, a collection of IpPermission objects is returned for the requested fleet ID. If the requested fleet has been deleted, the result set is empty.

Learn more

Setting up GameLift Fleets

Related operations

" }, "DescribeFleetUtilization":{ "name":"DescribeFleetUtilization", @@ -530,7 +596,39 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves utilization statistics for one or more fleets. You can request utilization data for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetUtilization object is returned for each requested fleet ID. When specifying a list of fleet IDs, utilization objects are returned only for fleets that currently exist.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves utilization statistics for one or more fleets. These statistics provide insight into how available hosting resources are currently being used. To get statistics on available hosting resources, see DescribeFleetCapacity.

You can request utilization data for all fleets, or specify a list of one or more fleet IDs. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetUtilization object is returned for each requested fleet ID, unless the fleet identifier is not found.

Some API actions may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

Learn more

Setting up GameLift Fleets

GameLift Metrics for Fleets

Related operations

" + }, + "DescribeGameServer":{ + "name":"DescribeGameServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeGameServerInput"}, + "output":{"shape":"DescribeGameServerOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Retrieves information for a game server resource. Information includes the game server statuses, health check info, and the instance the game server is running on.

To retrieve game server information, specify the game server ID. If successful, the requested game server object is returned.

Learn more

GameLift FleetIQ Guide

Related operations

" + }, + "DescribeGameServerGroup":{ + "name":"DescribeGameServerGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeGameServerGroupInput"}, + "output":{"shape":"DescribeGameServerGroupOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Retrieves information on a game server group.

To get attributes for a game server group, provide a group name or ARN value. If successful, a GameServerGroup object is returned.

Learn more

GameLift FleetIQ Guide

Related operations

" }, "DescribeGameSessionDetails":{ "name":"DescribeGameSessionDetails", @@ -579,7 +677,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves the properties for one or more game session queues. When requesting multiple queues, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionQueue object is returned for each requested queue. When specifying a list of queues, objects are returned only for queues that currently exist in the Region.

" + "documentation":"

Retrieves the properties for one or more game session queues. When requesting multiple queues, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSessionQueue object is returned for each requested queue. When specifying a list of queues, objects are returned only for queues that currently exist in the Region.

Learn more

View Your Queues

Related operations

" }, "DescribeGameSessions":{ "name":"DescribeGameSessions", @@ -612,7 +710,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves information about a fleet's instances, including instance IDs. Use this action to get details on all instances in the fleet or get details on one specific instance.

To get a specific instance, specify fleet ID and instance ID. To get all instances in a fleet, specify a fleet ID only. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, an Instance object is returned for each result.

" + "documentation":"

Retrieves information about a fleet's instances, including instance IDs. Use this action to get details on all instances in the fleet or get details on one specific instance.

To get a specific instance, specify fleet ID and instance ID. To get all instances in a fleet, specify a fleet ID only. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, an Instance object is returned for each result.

Learn more

Remotely Access Fleet Instances

Debug Fleet Issues

Related operations

" }, "DescribeMatchmaking":{ "name":"DescribeMatchmaking", @@ -690,7 +788,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves the current runtime configuration for the specified fleet. The runtime configuration tells Amazon GameLift how to launch server processes on instances in the fleet.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Retrieves a fleet's runtime configuration settings. The runtime configuration tells Amazon GameLift which server processes to run (and how) on each instance in the fleet.

To get a runtime configuration, specify the fleet's unique identifier. If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty.

Learn more

Setting up GameLift Fleets

Running Multiple Processes on a Fleet

Related operations

" }, "DescribeScalingPolicies":{ "name":"DescribeScalingPolicies", @@ -785,7 +883,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Requests remote access to a fleet instance. Remote access is useful for debugging, gathering benchmarking data, or watching activity in real time.

Access requires credentials that match the operating system of the instance. For a Windows instance, Amazon GameLift returns a user name and password as strings for use with a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name and RSA private key, also as strings, for use with an SSH client. The private key must be saved in the proper format to a .pem file before using. If you're making this request using the AWS CLI, saving the secret can be handled as part of the GetInstanceAccess request. (See the example later in this topic). For more information on remote access, see Remotely Accessing an Instance.

To request access to a specific instance, specify the IDs of both the instance and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling DescribeInstances. If successful, an InstanceAccess object is returned containing the instance's IP address and a set of credentials.

" + "documentation":"

Requests remote access to a fleet instance. Remote access is useful for debugging, gathering benchmarking data, or observing activity in real time.

To remotely access an instance, you need credentials that match the operating system of the instance. For a Windows instance, Amazon GameLift returns a user name and password as strings for use with a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name and RSA private key, also as strings, for use with an SSH client. The private key must be saved in the proper format to a .pem file before using. If you're making this request using the AWS CLI, saving the secret can be handled as part of the GetInstanceAccess request, as shown in one of the examples for this action.

To request access to a specific instance, specify the IDs of both the instance and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling DescribeInstances. If successful, an InstanceAccess object is returned that contains the instance's IP address and a set of credentials.

Learn more

Remotely Access Fleet Instances

Debug Fleet Issues

Related operations

" }, "ListAliases":{ "name":"ListAliases", @@ -815,7 +913,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves build records for all builds associated with the AWS account in use. You can limit results to builds that are in a specific status by using the Status parameter. Use the pagination parameters to retrieve results in a set of sequential pages.

Build records are not listed in any particular order.

Learn more

Working with Builds

Related operations

" + "documentation":"

Retrieves build resources for all builds associated with the AWS account in use. You can limit results to builds that are in a specific status by using the Status parameter. Use the pagination parameters to retrieve results in a set of sequential pages.

Build resources are not listed in any particular order.

Learn more

Upload a Custom Server Build

Related operations

" }, "ListFleets":{ "name":"ListFleets", @@ -831,7 +929,37 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Retrieves a collection of fleet records for this AWS account. You can filter the result set to find only those fleets that are deployed with a specific build or script. Use the pagination parameters to retrieve results in sequential pages.

Fleet records are not listed in a particular order.

Learn more

Set Up Fleets.

Related operations

" + "documentation":"

Retrieves a collection of fleet resources for this AWS account. You can filter the result set to find only those fleets that are deployed with a specific build or script. Use the pagination parameters to retrieve results in sequential pages.

Fleet resources are not listed in a particular order.

Learn more

Setting up GameLift Fleets

Related operations

" + }, + "ListGameServerGroups":{ + "name":"ListGameServerGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGameServerGroupsInput"}, + "output":{"shape":"ListGameServerGroupsOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Retrieves information on all game servers groups that exist in the current AWS account for the selected region. Use the pagination parameters to retrieve results in a set of sequential pages.

Learn more

GameLift FleetIQ Guide

Related operations

" + }, + "ListGameServers":{ + "name":"ListGameServers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGameServersInput"}, + "output":{"shape":"ListGameServersOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Retrieves information on all game servers that are currently running in a specified game server group. If there are custom key sort values for your game servers, you can opt to have the returned list sorted based on these values. Use the pagination parameters to retrieve results in a set of sequential pages.

Learn more

GameLift FleetIQ Guide

Related operations

" }, "ListScripts":{ "name":"ListScripts", @@ -880,6 +1008,23 @@ ], "documentation":"

Creates or updates a scaling policy for a fleet. Scaling policies are used to automatically scale a fleet's hosting capacity to meet player demand. An active scaling policy instructs Amazon GameLift to track a fleet metric and automatically change the fleet's capacity when a certain threshold is reached. There are two types of scaling policies: target-based and rule-based. Use a target-based policy to quickly and efficiently manage fleet scaling; this option is the most commonly used. Use rule-based policies when you need to exert fine-grained control over auto-scaling.

Fleets can have multiple scaling policies of each type in force at the same time; you can have one target-based policy, one or multiple rule-based scaling policies, or both. We recommend caution, however, because multiple auto-scaling policies can have unintended consequences.

You can temporarily suspend all scaling policies for a fleet by calling StopFleetActions with the fleet action AUTO_SCALING. To resume scaling policies, call StartFleetActions with the same fleet action. To stop just one scaling policy--or to permanently remove it, you must delete the policy with DeleteScalingPolicy.

Learn more about how to work with auto-scaling in Set Up Fleet Automatic Scaling.

Target-based policy

A target-based policy tracks a single metric: PercentAvailableGameSessions. This metric tells us how much of a fleet's hosting capacity is ready to host game sessions but is not currently in use. This is the fleet's buffer; it measures the additional player demand that the fleet could handle at current capacity. With a target-based policy, you set your ideal buffer size and leave it to Amazon GameLift to take whatever action is needed to maintain that target.

For example, you might choose to maintain a 10% buffer for a fleet that has the capacity to host 100 simultaneous game sessions. This policy tells Amazon GameLift to take action whenever the fleet's available capacity falls below or rises above 10 game sessions. Amazon GameLift will start new instances or stop unused instances in order to return to the 10% buffer.

To create or update a target-based policy, specify a fleet ID and name, and set the policy type to \"TargetBased\". Specify the metric to track (PercentAvailableGameSessions) and reference a TargetConfiguration object with your desired buffer value. Exclude all other parameters. On a successful request, the policy name is returned. The scaling policy is automatically in force as soon as it's successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

Rule-based policy

A rule-based policy tracks specified fleet metric, sets a threshold value, and specifies the type of action to initiate when triggered. With a rule-based policy, you can select from several available fleet metrics. Each policy specifies whether to scale up or scale down (and by how much), so you need one policy for each type of action.

For example, a policy may make the following statement: \"If the percentage of idle instances is greater than 20% for more than 15 minutes, then reduce the fleet capacity by 10%.\"

A policy's rule statement has the following structure:

If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment].

To implement the example, the rule statement would look like this:

If [PercentIdleInstances] is [GreaterThanThreshold] [20] for [15] minutes, then [PercentChangeInCapacity] to/by [10].

To create or update a scaling policy, specify a unique combination of name and fleet ID, and set the policy type to \"RuleBased\". Specify the parameter values for a policy rule statement. On a successful request, the policy name is returned. Scaling policies are automatically in force as soon as they're successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

" }, + "RegisterGameServer":{ + "name":"RegisterGameServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterGameServerInput"}, + "output":{"shape":"RegisterGameServerOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Creates a new game server resource and notifies GameLift FleetIQ that the game server is ready to host gameplay and players. This action is called by a game server process that is running on an instance in a game server group. Registering game servers enables GameLift FleetIQ to track available game servers and enables game clients and services to claim a game server for a new game session.

To register a game server, identify the game server group and instance where the game server is running, and provide a unique identifier for the game server. You can also include connection and game server data; when a game client or service requests a game server by calling ClaimGameServer, this information is returned in response.

Once a game server is successfully registered, it is put in status AVAILABLE. A request to register a game server may fail if the instance it is in the process of shutting down as part of instance rebalancing or scale-down activity.

Learn more

GameLift FleetIQ Guide

Related operations

" + }, "RequestUploadCredentials":{ "name":"RequestUploadCredentials", "http":{ @@ -894,7 +1039,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves a fresh set of credentials for use when uploading a new set of game build files to Amazon GameLift's Amazon S3. This is done as part of the build creation process; see CreateBuild.

To request new credentials, specify the build ID as returned with an initial CreateBuild request. If successful, a new set of credentials are returned, along with the S3 storage location associated with the build ID.

Learn more

Uploading Your Game

Related operations

" + "documentation":"

Retrieves a fresh set of credentials for use when uploading a new set of game build files to Amazon GameLift's Amazon S3. This is done as part of the build creation process; see CreateBuild.

To request new credentials, specify the build ID as returned with an initial CreateBuild request. If successful, a new set of credentials are returned, along with the S3 storage location associated with the build ID.

Learn more

Create a Build with Files in S3

Related operations

" }, "ResolveAlias":{ "name":"ResolveAlias", @@ -913,6 +1058,22 @@ ], "documentation":"

Retrieves the fleet ID that an alias is currently pointing to.

" }, + "ResumeGameServerGroup":{ + "name":"ResumeGameServerGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResumeGameServerGroupInput"}, + "output":{"shape":"ResumeGameServerGroupOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Reinstates activity on a game server group after it has been suspended. A game server group may be suspended by calling SuspendGameServerGroup, or it may have been involuntarily suspended due to a configuration problem. You can manually resume activity on the group once the configuration problem has been resolved. Refer to the game server group status and status reason for more information on why group activity is suspended.

To resume activity, specify a game server group ARN and the type of activity to be resumed.

Learn more

GameLift FleetIQ Guide

Related operations

" + }, "SearchGameSessions":{ "name":"SearchGameSessions", "http":{ @@ -944,7 +1105,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Resumes activity on a fleet that was suspended with StopFleetActions. Currently, this operation is used to restart a fleet's auto-scaling activity.

To start fleet actions, specify the fleet ID and the type of actions to restart. When auto-scaling fleet actions are restarted, Amazon GameLift once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet were never stopped, this operation will have no effect. You can view a fleet's stopped actions using DescribeFleetAttributes.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Resumes activity on a fleet that was suspended with StopFleetActions. Currently, this operation is used to restart a fleet's auto-scaling activity.

To start fleet actions, specify the fleet ID and the type of actions to restart. When auto-scaling fleet actions are restarted, Amazon GameLift once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet were never stopped, this operation will have no effect. You can view a fleet's stopped actions using DescribeFleetAttributes.

Learn more

Setting up GameLift Fleets

Related operations

" }, "StartGameSessionPlacement":{ "name":"StartGameSessionPlacement", @@ -1008,7 +1169,7 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Suspends activity on a fleet. Currently, this operation is used to stop a fleet's auto-scaling activity. It is used to temporarily stop scaling events triggered by the fleet's scaling policies. The policies can be retained and auto-scaling activity can be restarted using StartFleetActions. You can view a fleet's stopped actions using DescribeFleetAttributes.

To stop fleet actions, specify the fleet ID and the type of actions to suspend. When auto-scaling fleet actions are stopped, Amazon GameLift no longer initiates scaling events except to maintain the fleet's desired instances setting (FleetCapacity. Changes to the fleet's capacity must be done manually using UpdateFleetCapacity.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Suspends activity on a fleet. Currently, this operation is used to stop a fleet's auto-scaling activity. It is used to temporarily stop triggering scaling events. The policies can be retained and auto-scaling activity can be restarted using StartFleetActions. You can view a fleet's stopped actions using DescribeFleetAttributes.

To stop fleet actions, specify the fleet ID and the type of actions to suspend. When auto-scaling fleet actions are stopped, Amazon GameLift no longer initiates scaling events except in response to manual changes using UpdateFleetCapacity.

Learn more

Setting up GameLift Fleets

Related operations

" }, "StopGameSessionPlacement":{ "name":"StopGameSessionPlacement", @@ -1042,6 +1203,22 @@ ], "documentation":"

Cancels a matchmaking ticket or match backfill ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED.

This call is also used to turn off automatic backfill for an individual game session. This is for game sessions that are created with a matchmaking configuration that has automatic backfill enabled. The ticket ID is included in the MatchmakerData of an updated game session object, which is provided to the game server.

If the action is successful, the service sends back an empty JSON struct with the HTTP 200 response (not an empty HTTP body).

Learn more

Add FlexMatch to a Game Client

Related operations

" }, + "SuspendGameServerGroup":{ + "name":"SuspendGameServerGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SuspendGameServerGroupInput"}, + "output":{"shape":"SuspendGameServerGroupOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Temporarily stops activity on a game server group without terminating instances or the game server group. Activity can be restarted by calling ResumeGameServerGroup. Activities that can suspended are:

  • Instance type replacement. This activity evaluates the current Spot viability of all instance types that are defined for the game server group. It updates the Auto Scaling group to remove nonviable Spot instance types (which have a higher chance of game server interruptions) and rebalances capacity across the remaining viable Spot instance types. When this activity is suspended, the Auto Scaling group continues with its current balance, regardless of viability. Instance protection, utilization metrics, and capacity autoscaling activities continue to be active.

To suspend activity, specify a game server group ARN and the type of activity to be suspended.

Learn more

GameLift FleetIQ Guide

Related operations

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -1056,7 +1233,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Assigns a tag to a GameLift resource. AWS resource tags provide an additional management tool set. You can use tags to organize resources, create IAM permissions policies to manage access to groups of resources, customize AWS cost breakdowns, etc. This action handles the permissions necessary to manage tags for the following GameLift resource types:

  • Build

  • Script

  • Fleet

  • Alias

  • GameSessionQueue

  • MatchmakingConfiguration

  • MatchmakingRuleSet

To add a tag to a resource, specify the unique ARN value for the resource and provide a trig list containing one or more tags. The operation succeeds even if the list includes tags that are already assigned to the specified resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

" + "documentation":"

Assigns a tag to a GameLift resource. AWS resource tags provide an additional management tool set. You can use tags to organize resources, create IAM permissions policies to manage access to groups of resources, customize AWS cost breakdowns, etc. This action handles the permissions necessary to manage tags for the following GameLift resource types:

  • Build

  • Script

  • Fleet

  • Alias

  • GameSessionQueue

  • MatchmakingConfiguration

  • MatchmakingRuleSet

To add a tag to a resource, specify the unique ARN value for the resource and provide a tag list containing one or more tags. The operation succeeds even if the list includes tags that are already assigned to the specified resource.

Learn more

Tagging AWS Resources in the AWS General Reference

AWS Tagging Strategies

Related operations

" }, "UntagResource":{ "name":"UntagResource", @@ -1104,7 +1281,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates metadata in a build record, including the build name and version. To update the metadata, specify the build ID to update and provide the new values. If successful, a build object containing the updated metadata is returned.

Learn more

Working with Builds

Related operations

" + "documentation":"

Updates metadata in a build resource, including the build name and version. To update the metadata, specify the build ID to update and provide the new values. If successful, a build object containing the updated metadata is returned.

Learn more

Upload a Custom Server Build

Related operations

" }, "UpdateFleetAttributes":{ "name":"UpdateFleetAttributes", @@ -1123,7 +1300,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates fleet properties, including name and description, for a fleet. To update metadata, specify the fleet ID and the property values that you want to change. If successful, the fleet ID for the updated fleet is returned.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Updates fleet properties, including name and description, for a fleet. To update metadata, specify the fleet ID and the property values that you want to change. If successful, the fleet ID for the updated fleet is returned.

Learn more

Setting up GameLift Fleets

Related operations

" }, "UpdateFleetCapacity":{ "name":"UpdateFleetCapacity", @@ -1142,7 +1319,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates capacity settings for a fleet. Use this action to specify the number of EC2 instances (hosts) that you want this fleet to contain. Before calling this action, you may want to call DescribeEC2InstanceLimits to get the maximum capacity based on the fleet's EC2 instance type.

Specify minimum and maximum number of instances. Amazon GameLift will not change fleet capacity to values fall outside of this range. This is particularly important when using auto-scaling (see PutScalingPolicy) to allow capacity to adjust based on player demand while imposing limits on automatic adjustments.

To update fleet capacity, specify the fleet ID and the number of instances you want the fleet to host. If successful, Amazon GameLift starts or terminates instances so that the fleet's active instance count matches the desired instance count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. If the desired instance count is higher than the instance type's limit, the \"Limit Exceeded\" exception occurs.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Updates capacity settings for a fleet. Use this action to specify the number of EC2 instances (hosts) that you want this fleet to contain. Before calling this action, you may want to call DescribeEC2InstanceLimits to get the maximum capacity based on the fleet's EC2 instance type.

Specify minimum and maximum number of instances. Amazon GameLift will not change fleet capacity to values fall outside of this range. This is particularly important when using auto-scaling (see PutScalingPolicy) to allow capacity to adjust based on player demand while imposing limits on automatic adjustments.

To update fleet capacity, specify the fleet ID and the number of instances you want the fleet to host. If successful, Amazon GameLift starts or terminates instances so that the fleet's active instance count matches the desired instance count. You can view a fleet's current capacity information by calling DescribeFleetCapacity. If the desired instance count is higher than the instance type's limit, the \"Limit Exceeded\" exception occurs.

Learn more

Setting up GameLift Fleets

Related operations

" }, "UpdateFleetPortSettings":{ "name":"UpdateFleetPortSettings", @@ -1161,7 +1338,39 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates port settings for a fleet. To update settings, specify the fleet ID to be updated and list the permissions you want to update. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Updates port settings for a fleet. To update settings, specify the fleet ID to be updated and list the permissions you want to update. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned.

Learn more

Setting up GameLift Fleets

Related operations

" + }, + "UpdateGameServer":{ + "name":"UpdateGameServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateGameServerInput"}, + "output":{"shape":"UpdateGameServerOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Updates information about a registered game server. This action is called by a game server process that is running on an instance in a game server group. There are three reasons to update game server information: (1) to change the utilization status of the game server, (2) to report game server health status, and (3) to change game server metadata. A registered game server should regularly report health and should update utilization status when it is supporting gameplay so that GameLift FleetIQ can accurately track game server availability. You can make all three types of updates in the same request.

  • To update the game server's utilization status, identify the game server and game server group and specify the current utilization status. Use this status to identify when game servers are currently hosting games and when they are available to be claimed.

  • To report health status, identify the game server and game server group and set health check to HEALTHY. If a game server does not report health status for a certain length of time, the game server is no longer considered healthy and will be eventually de-registered from the game server group to avoid affecting utilization metrics. The best practice is to report health every 60 seconds.

  • To change game server metadata, provide updated game server data and custom sort key values.

Once a game server is successfully updated, the relevant statuses and timestamps are updated.

Learn more

GameLift FleetIQ Guide

Related operations

" + }, + "UpdateGameServerGroup":{ + "name":"UpdateGameServerGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateGameServerGroupInput"}, + "output":{"shape":"UpdateGameServerGroupOutput"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

This action is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Updates GameLift FleetIQ-specific properties for a game server group. These properties include instance rebalancing and game server protection. Many Auto Scaling group properties are updated directly. These include autoscaling policies, minimum/maximum/desired instance counts, and launch template.

To update the game server group, specify the game server group ID and provide the updated values.

Updated properties are validated to ensure that GameLift FleetIQ can continue to perform its core instance rebalancing activity. When you change Auto Scaling group properties directly and the changes cause errors with GameLift FleetIQ activities, an alert is sent.

Learn more

GameLift FleetIQ Guide

Updating a GameLift FleetIQ-Linked Auto Scaling Group

Related operations

" }, "UpdateGameSession":{ "name":"UpdateGameSession", @@ -1195,7 +1404,7 @@ {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Updates settings for a game session queue, which determines how new game session requests in the queue are processed. To update settings, specify the queue name to be updated and provide the new settings. When updating destinations, provide a complete list of destinations.

" + "documentation":"

Updates settings for a game session queue, which determines how new game session requests in the queue are processed. To update settings, specify the queue name to be updated and provide the new settings. When updating destinations, provide a complete list of destinations.

Learn more

Using Multi-Region Queues

Related operations

" }, "UpdateMatchmakingConfiguration":{ "name":"UpdateMatchmakingConfiguration", @@ -1228,7 +1437,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidFleetStatusException"} ], - "documentation":"

Updates the current runtime configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration object with an updated set of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated runtime configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; runtime configuration changes are applied gradually as existing processes shut down and new processes are launched during Amazon GameLift's normal process recycling activity.

Learn more

Working with Fleets.

Related operations

" + "documentation":"

Updates the current runtime configuration for the specified fleet, which tells Amazon GameLift how to launch server processes on instances in the fleet. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in an ACTIVE status.

To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration object with an updated set of server process configurations.

Each instance in a Amazon GameLift fleet checks regularly for an updated runtime configuration and changes how it launches server processes to comply with the latest version. Existing server processes are not affected by the update; runtime configuration changes are applied gradually as existing processes shut down and new processes are launched during Amazon GameLift's normal process recycling activity.

Learn more

Setting up GameLift Fleets

Related operations

" }, "UpdateScript":{ "name":"UpdateScript", @@ -1310,8 +1519,8 @@ "documentation":"

A descriptive label that is associated with an alias. Alias names do not need to be unique.

" }, "AliasArn":{ - "shape":"ArnStringModel", - "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift alias resource and uniquely identifies it. ARNs are unique across all Regions.. In a GameLift alias ARN, the resource ID matches the alias ID value.

" + "shape":"AliasArn", + "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift alias resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift alias ARN, the resource ID matches the alias ID value.

" }, "Description":{ "shape":"FreeText", @@ -1332,7 +1541,15 @@ }, "documentation":"

Properties that describe an alias resource.

" }, + "AliasArn":{ + "type":"string", + "pattern":"^arn:.*:alias\\/alias-\\S+" + }, "AliasId":{ + "type":"string", + "pattern":"^alias-\\S+" + }, + "AliasIdOrArn":{ "type":"string", "pattern":"^alias-\\S+|^arn:.*:alias\\/alias-\\S+" }, @@ -1373,6 +1590,12 @@ }, "documentation":"

Values for use in Player attribute key-value pairs. This object lets you specify an attribute value using any of the valid data types: string, number, string array, or data map. Each AttributeValue object can use only one of the available properties.

" }, + "AutoScalingGroupArn":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, "AwsCredentials":{ "type":"structure", "members":{ @@ -1399,6 +1622,13 @@ "MANUAL" ] }, + "BalancingStrategy":{ + "type":"string", + "enum":[ + "SPOT_ONLY", + "SPOT_PREFERRED" + ] + }, "BooleanModel":{"type":"boolean"}, "Build":{ "type":"structure", @@ -1443,6 +1673,10 @@ "pattern":"^arn:.*:build\\/build-\\S+" }, "BuildId":{ + "type":"string", + "pattern":"^build-\\S+" + }, + "BuildIdOrArn":{ "type":"string", "pattern":"^build-\\S+|^arn:.*:build\\/build-\\S+" }, @@ -1476,6 +1710,33 @@ "GENERATED" ] }, + "ClaimGameServerInput":{ + "type":"structure", + "required":["GameServerGroupName"], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

An identifier for the game server group. When claiming a specific game server, this is the game server group whether the game server is located. When requesting that GameLift FleetIQ locate an available game server, this is the game server group to search on. You can use either the GameServerGroup name or ARN value.

" + }, + "GameServerId":{ + "shape":"GameServerId", + "documentation":"

A custom string that uniquely identifies the game server to claim. If this parameter is left empty, GameLift FleetIQ searches for an available game server in the specified game server group.

" + }, + "GameServerData":{ + "shape":"GameServerData", + "documentation":"

A set of custom game server properties, formatted as a single string value, to be passed to the claimed game server.

" + } + } + }, + "ClaimGameServerOutput":{ + "type":"structure", + "members":{ + "GameServer":{ + "shape":"GameServer", + "documentation":"

Object that describes the newly claimed game server resource.

" + } + } + }, "ComparisonOperatorType":{ "type":"string", "enum":[ @@ -1542,7 +1803,7 @@ }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an Amazon S3 bucket that you own. The storage location must specify an Amazon S3 bucket name and key. The location must also specify a role ARN that you set up to allow Amazon GameLift to access your Amazon S3 bucket. The S3 bucket and your new build must be in the same Region.

" + "documentation":"

Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an S3 bucket that you own. The storage location must specify an S3 bucket name and key. The location must also specify a role ARN that you set up to allow Amazon GameLift to access your S3 bucket. The S3 bucket and your new build must be in the same Region.

" }, "OperatingSystem":{ "shape":"OperatingSystem", @@ -1560,11 +1821,11 @@ "members":{ "Build":{ "shape":"Build", - "documentation":"

The newly created build record, including a unique build IDs and status.

" + "documentation":"

The newly created build resource, including a unique build IDs and status.

" }, "UploadCredentials":{ "shape":"AwsCredentials", - "documentation":"

This element is returned only when the operation is called without a storage location. It contains credentials to use when you are uploading a build file to an Amazon S3 bucket that is owned by Amazon GameLift. Credentials have a limited life span. To refresh these credentials, call RequestUploadCredentials.

" + "documentation":"

This element is returned only when the operation is called without a storage location. It contains credentials to use when you are uploading a build file to an S3 bucket that is owned by Amazon GameLift. Credentials have a limited life span. To refresh these credentials, call RequestUploadCredentials.

" }, "StorageLocation":{ "shape":"S3Location", @@ -1589,11 +1850,11 @@ "documentation":"

A human-readable description of a fleet.

" }, "BuildId":{ - "shape":"BuildId", + "shape":"BuildIdOrArn", "documentation":"

A unique identifier for a build to be deployed on the new fleet. You can use either the build ID or ARN value. The custom game server build must have been successfully uploaded to Amazon GameLift and be in a READY status. This fleet setting cannot be changed once the fleet is created.

" }, "ScriptId":{ - "shape":"ScriptId", + "shape":"ScriptIdOrArn", "documentation":"

A unique identifier for a Realtime script to be deployed on the new fleet. You can use either the script ID or ARN value. The Realtime script must have been successfully uploaded to Amazon GameLift. This fleet setting cannot be changed once the fleet is created.

" }, "ServerLaunchPath":{ @@ -1669,16 +1930,82 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "CreateGameServerGroupInput":{ + "type":"structure", + "required":[ + "GameServerGroupName", + "RoleArn", + "MinSize", + "MaxSize", + "LaunchTemplate", + "InstanceDefinitions" + ], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupName", + "documentation":"

An identifier for the new game server group. This value is used to generate unique ARN identifiers for the EC2 Auto Scaling group and the GameLift FleetIQ game server group. The name must be unique per Region per AWS account.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift to access your EC2 Auto Scaling groups. The submitted role is validated to ensure that it contains the necessary permissions for game server groups.

" + }, + "MinSize":{ + "shape":"WholeNumber", + "documentation":"

The minimum number of instances allowed in the EC2 Auto Scaling group. During autoscaling events, GameLift FleetIQ and EC2 do not scale down the group below this minimum. In production, this value should be set to at least 1.

" + }, + "MaxSize":{ + "shape":"PositiveInteger", + "documentation":"

The maximum number of instances allowed in the EC2 Auto Scaling group. During autoscaling events, GameLift FleetIQ and EC2 do not scale up the group above this maximum.

" + }, + "LaunchTemplate":{ + "shape":"LaunchTemplateSpecification", + "documentation":"

The EC2 launch template that contains configuration settings and game server code to be deployed to all instances in the game server group. You can specify the template using either the template name or ID. For help with creating a launch template, see Creating a Launch Template for an Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + }, + "InstanceDefinitions":{ + "shape":"InstanceDefinitions", + "documentation":"

A set of EC2 instance types to use when creating instances in the group. The instance definitions must specify at least two different instance types that are supported by GameLift FleetIQ. For more information on instance types, see EC2 Instance Types in the Amazon EC2 User Guide.

" + }, + "AutoScalingPolicy":{ + "shape":"GameServerGroupAutoScalingPolicy", + "documentation":"

Configuration settings to define a scaling policy for the Auto Scaling group that is optimized for game hosting. The scaling policy uses the metric \"PercentUtilizedGameServers\" to maintain a buffer of idle game servers that can immediately accommodate new games and players. Once the game server and Auto Scaling groups are created, you can update the scaling policy settings directly in Auto Scaling Groups.

" + }, + "BalancingStrategy":{ + "shape":"BalancingStrategy", + "documentation":"

The fallback balancing method to use for the game server group when Spot instances in a Region become unavailable or are not viable for game hosting. Once triggered, this method remains active until Spot instances can once again be used. Method options include:

  • SPOT_ONLY -- If Spot instances are unavailable, the game server group provides no hosting capacity. No new instances are started, and the existing nonviable Spot instances are terminated (once current gameplay ends) and not replaced.

  • SPOT_PREFERRED -- If Spot instances are unavailable, the game server group continues to provide hosting capacity by using On-Demand instances. Existing nonviable Spot instances are terminated (once current gameplay ends) and replaced with new On-Demand instances.

" + }, + "GameServerProtectionPolicy":{ + "shape":"GameServerProtectionPolicy", + "documentation":"

A flag that indicates whether instances in the game server group are protected from early termination. Unprotected instances that have active game servers running may by terminated during a scale-down event, causing players to be dropped from the game. Protected instances cannot be terminated while there are active game servers running. An exception to this is Spot Instances, which may be terminated by AWS regardless of protection status. This property is set to NO_PROTECTION by default.

" + }, + "VpcSubnets":{ + "shape":"VpcSubnets", + "documentation":"

A list of virtual private cloud (VPC) subnets to use with instances in the game server group. By default, all GameLift FleetIQ-supported availability zones are used; this parameter allows you to specify VPCs that you've set up.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of labels to assign to the new game server group resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" + } + } + }, + "CreateGameServerGroupOutput":{ + "type":"structure", + "members":{ + "GameServerGroup":{ + "shape":"GameServerGroup", + "documentation":"

The newly created game server group object, including the new ARN value for the GameLift FleetIQ game server group and the object's status. The EC2 Auto Scaling group ARN is initially null, since the group has not yet been created. This value is added once the game server group status reaches ACTIVE.

" + } + } + }, "CreateGameSessionInput":{ "type":"structure", "required":["MaximumPlayerSessionCount"], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to create a game session in. You can use either the fleet ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "AliasId":{ - "shape":"AliasId", + "shape":"AliasIdOrArn", "documentation":"

A unique identifier for an alias associated with the fleet to create a game session in. You can use either the alias ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "MaximumPlayerSessionCount":{ @@ -2035,7 +2362,7 @@ "required":["AliasId"], "members":{ "AliasId":{ - "shape":"AliasId", + "shape":"AliasIdOrArn", "documentation":"

A unique identifier of the alias that you want to delete. You can use either the alias ID or ARN value.

" } }, @@ -2046,7 +2373,7 @@ "required":["BuildId"], "members":{ "BuildId":{ - "shape":"BuildId", + "shape":"BuildIdOrArn", "documentation":"

A unique identifier for a build to delete. You can use either the build ID or ARN value.

" } }, @@ -2057,18 +2384,41 @@ "required":["FleetId"], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to be deleted. You can use either the fleet ID or ARN value.

" } }, "documentation":"

Represents the input for a request action.

" }, + "DeleteGameServerGroupInput":{ + "type":"structure", + "required":["GameServerGroupName"], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

The unique identifier of the game server group to delete. Use either the GameServerGroup name or ARN value.

" + }, + "DeleteOption":{ + "shape":"GameServerGroupDeleteOption", + "documentation":"

The type of delete to perform. Options include:

  • SAFE_DELETE – Terminates the game server group and EC2 Auto Scaling group only when it has no game servers that are in IN_USE status.

  • FORCE_DELETE – Terminates the game server group, including all active game servers regardless of their utilization status, and the EC2 Auto Scaling group.

  • RETAIN – Does a safe delete of the game server group but retains the EC2 Auto Scaling group as is.

" + } + } + }, + "DeleteGameServerGroupOutput":{ + "type":"structure", + "members":{ + "GameServerGroup":{ + "shape":"GameServerGroup", + "documentation":"

An object that describes the deleted game server group resource, with status updated to DELETE_SCHEDULED.

" + } + } + }, "DeleteGameSessionQueueInput":{ "type":"structure", "required":["Name"], "members":{ "Name":{ - "shape":"GameSessionQueueName", + "shape":"GameSessionQueueNameOrArn", "documentation":"

A descriptive label that is associated with game session queue. Queue names must be unique within each Region. You can use either the queue ID or ARN value.

" } }, @@ -2124,7 +2474,7 @@ "documentation":"

A descriptive label that is associated with a scaling policy. Policy names do not need to be unique.

" }, "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to be deleted. You can use either the fleet ID or ARN value.

" } }, @@ -2135,7 +2485,7 @@ "required":["ScriptId"], "members":{ "ScriptId":{ - "shape":"ScriptId", + "shape":"ScriptIdOrArn", "documentation":"

A unique identifier for a Realtime script to delete. You can use either the script ID or ARN value.

" } } @@ -2186,12 +2536,29 @@ "members":{ } }, + "DeregisterGameServerInput":{ + "type":"structure", + "required":[ + "GameServerGroupName", + "GameServerId" + ], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

An identifier for the game server group where the game server to be de-registered is running. Use either the GameServerGroup name or ARN value.

" + }, + "GameServerId":{ + "shape":"GameServerId", + "documentation":"

The identifier for the game server to be de-registered.

" + } + } + }, "DescribeAliasInput":{ "type":"structure", "required":["AliasId"], "members":{ "AliasId":{ - "shape":"AliasId", + "shape":"AliasIdOrArn", "documentation":"

The unique identifier for the fleet alias that you want to retrieve. You can use either the alias ID or ARN value.

" } }, @@ -2212,7 +2579,7 @@ "required":["BuildId"], "members":{ "BuildId":{ - "shape":"BuildId", + "shape":"BuildIdOrArn", "documentation":"

A unique identifier for a build to retrieve properties for. You can use either the build ID or ARN value.

" } }, @@ -2252,8 +2619,8 @@ "type":"structure", "members":{ "FleetIds":{ - "shape":"FleetIdList", - "documentation":"

A unique identifier for a fleet(s) to retrieve attributes for. You can use either the fleet ID or ARN value.

" + "shape":"FleetIdOrArnList", + "documentation":"

A list of unique fleet identifiers to retrieve attributes for. You can use either the fleet ID or ARN value. To retrieve attributes for all current fleets, do not include this parameter. If the list of fleet identifiers includes fleets that don't currently exist, the request succeeds but no attributes for that fleet are returned.

" }, "Limit":{ "shape":"PositiveInteger", @@ -2271,7 +2638,7 @@ "members":{ "FleetAttributes":{ "shape":"FleetAttributesList", - "documentation":"

A collection of objects containing attribute metadata for each requested fleet ID.

" + "documentation":"

A collection of objects containing attribute metadata for each requested fleet ID. Attribute objects are returned only for fleets that currently exist.

" }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -2284,7 +2651,7 @@ "type":"structure", "members":{ "FleetIds":{ - "shape":"FleetIdList", + "shape":"FleetIdOrArnList", "documentation":"

A unique identifier for a fleet(s) to retrieve capacity information for. You can use either the fleet ID or ARN value.

" }, "Limit":{ @@ -2317,7 +2684,7 @@ "required":["FleetId"], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to get event logs for. You can use either the fleet ID or ARN value.

" }, "StartTime":{ @@ -2358,7 +2725,7 @@ "required":["FleetId"], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to retrieve port settings for. You can use either the fleet ID or ARN value.

" } }, @@ -2378,8 +2745,8 @@ "type":"structure", "members":{ "FleetIds":{ - "shape":"FleetIdList", - "documentation":"

A unique identifier for a fleet(s) to retrieve utilization data for. You can use either the fleet ID or ARN value.

" + "shape":"FleetIdOrArnList", + "documentation":"

A unique identifier for a fleet(s) to retrieve utilization data for. You can use either the fleet ID or ARN value. To retrieve attributes for all current fleets, do not include this parameter. If the list of fleet identifiers includes fleets that don't currently exist, the request succeeds but no attributes for that fleet are returned.

" }, "Limit":{ "shape":"PositiveInteger", @@ -2406,11 +2773,56 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "DescribeGameServerGroupInput":{ + "type":"structure", + "required":["GameServerGroupName"], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

The unique identifier for the game server group being requested. Use either the GameServerGroup name or ARN value.

" + } + } + }, + "DescribeGameServerGroupOutput":{ + "type":"structure", + "members":{ + "GameServerGroup":{ + "shape":"GameServerGroup", + "documentation":"

An object that describes the requested game server group resource.

" + } + } + }, + "DescribeGameServerInput":{ + "type":"structure", + "required":[ + "GameServerGroupName", + "GameServerId" + ], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

An identifier for the game server group where the game server is running. Use either the GameServerGroup name or ARN value.

" + }, + "GameServerId":{ + "shape":"GameServerId", + "documentation":"

The identifier for the game server to be retrieved.

" + } + } + }, + "DescribeGameServerOutput":{ + "type":"structure", + "members":{ + "GameServer":{ + "shape":"GameServer", + "documentation":"

Object that describes the requested game server resource.

" + } + } + }, "DescribeGameSessionDetailsInput":{ "type":"structure", "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to retrieve all game sessions active on the fleet. You can use either the fleet ID or ARN value.

" }, "GameSessionId":{ @@ -2418,7 +2830,7 @@ "documentation":"

A unique identifier for the game session to retrieve.

" }, "AliasId":{ - "shape":"AliasId", + "shape":"AliasIdOrArn", "documentation":"

A unique identifier for an alias associated with the fleet to retrieve all game sessions for. You can use either the alias ID or ARN value.

" }, "StatusFilter":{ @@ -2475,7 +2887,7 @@ "type":"structure", "members":{ "Names":{ - "shape":"GameSessionQueueNameList", + "shape":"GameSessionQueueNameOrArnList", "documentation":"

A list of queue names to retrieve information for. You can use either the queue ID or ARN value. To request settings for all queues, leave this parameter empty.

" }, "Limit":{ @@ -2507,7 +2919,7 @@ "type":"structure", "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to retrieve all game sessions for. You can use either the fleet ID or ARN value.

" }, "GameSessionId":{ @@ -2515,7 +2927,7 @@ "documentation":"

A unique identifier for the game session to retrieve.

" }, "AliasId":{ - "shape":"AliasId", + "shape":"AliasIdOrArn", "documentation":"

A unique identifier for an alias associated with the fleet to retrieve all game sessions for. You can use either the alias ID or ARN value.

" }, "StatusFilter":{ @@ -2552,7 +2964,7 @@ "required":["FleetId"], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to retrieve instance information for. You can use either the fleet ID or ARN value.

" }, "InstanceId":{ @@ -2723,7 +3135,7 @@ "required":["FleetId"], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to get the runtime configuration for. You can use either the fleet ID or ARN value.

" } }, @@ -2744,7 +3156,7 @@ "required":["FleetId"], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to retrieve scaling policies for. You can use either the fleet ID or ARN value.

" }, "StatusFilter":{ @@ -2781,7 +3193,7 @@ "required":["ScriptId"], "members":{ "ScriptId":{ - "shape":"ScriptId", + "shape":"ScriptIdOrArn", "documentation":"

A unique identifier for a Realtime script to retrieve properties for. You can use either the script ID or ARN value.

" } } @@ -2882,7 +3294,7 @@ "documentation":"

Number of instances in the fleet that are no longer active but haven't yet been terminated.

" } }, - "documentation":"

Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

" + "documentation":"

Current status of fleet capacity. The number of active instances should match or be in the process of matching the number of desired instances. Pending and terminating counts are non-zero only if fleet capacity is adjusting to an UpdateFleetCapacity request, or if access to resources is temporarily affected.

" }, "EC2InstanceLimit":{ "type":"structure", @@ -2982,7 +3394,7 @@ }, "EventCode":{ "shape":"EventCode", - "documentation":"

The type of event being logged.

Fleet creation events (ordered by fleet creation activity):

  • FLEET_CREATED -- A fleet record was successfully created with a status of NEW. Event messaging includes the fleet ID.

  • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

  • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

  • FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_RUNNING_INSTALLER – The game server build files were successfully extracted, and the Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the Amazon GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING.

  • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance.

  • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING.

  • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance.

  • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING.

  • FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. Learn more at Debug Fleet Creation Issues

  • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

VPC peering events:

  • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your AWS account.

  • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information (see DescribeVpcPeeringConnections) provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your AWS account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

  • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

Spot instance events:

  • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

Other fleet events:

  • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

  • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

  • FLEET_DELETED -- A request to delete a fleet was initiated.

  • GENERIC_EVENT -- An unspecified event has occurred.

" + "documentation":"

The type of event being logged.

Fleet creation events (ordered by fleet creation activity):

  • FLEET_CREATED -- A fleet resource was successfully created with a status of NEW. Event messaging includes the fleet ID.

  • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

  • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

  • FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_RUNNING_INSTALLER – The game server build files were successfully extracted, and the Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the Amazon GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

  • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING.

  • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance.

  • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING.

  • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance.

  • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING.

  • FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. Learn more at Debug Fleet Creation Issues

  • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

VPC peering events:

  • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your AWS account.

  • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information (see DescribeVpcPeeringConnections) provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your AWS account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

  • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

Spot instance events:

  • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

Other fleet events:

  • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

  • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

  • FLEET_DELETED -- A request to delete a fleet was initiated.

  • GENERIC_EVENT -- An unspecified event has occurred.

" }, "Message":{ "shape":"NonEmptyString", @@ -3051,6 +3463,10 @@ "max":1, "min":1 }, + "FleetArn":{ + "type":"string", + "pattern":"^arn:.*:fleet\\/fleet-\\S+" + }, "FleetAttributes":{ "type":"structure", "members":{ @@ -3059,7 +3475,7 @@ "documentation":"

A unique identifier for a fleet.

" }, "FleetArn":{ - "shape":"ArnStringModel", + "shape":"FleetArn", "documentation":"

The Amazon Resource Name (ARN) that is assigned to a GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift fleet ARN, the resource ID matches the FleetId value.

" }, "FleetType":{ @@ -3147,7 +3563,7 @@ "documentation":"

Indicates whether a TLS/SSL certificate was generated for the fleet.

" } }, - "documentation":"

General properties describing a fleet.

" + "documentation":"

General properties describing a fleet.

" }, "FleetAttributesList":{ "type":"list", @@ -3169,7 +3585,7 @@ "documentation":"

Current status of fleet capacity.

" } }, - "documentation":"

Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

" + "documentation":"

Information about the fleet's capacity. Fleet capacity is measured in EC2 instances. By default, new fleets have a capacity of one instance, but can be updated as needed. The maximum number of instances for a fleet is determined by the fleet's instance type.

" }, "FleetCapacityExceededException":{ "type":"structure", @@ -3185,13 +3601,22 @@ }, "FleetId":{ "type":"string", - "pattern":"^fleet-\\S+|^arn:.*:fleet\\/fleet-\\S+" + "pattern":"^fleet-\\S+" }, "FleetIdList":{ "type":"list", "member":{"shape":"FleetId"}, "min":1 }, + "FleetIdOrArn":{ + "type":"string", + "pattern":"^fleet-\\S+|^arn:.*:fleet\\/fleet-\\S+" + }, + "FleetIdOrArnList":{ + "type":"list", + "member":{"shape":"FleetIdOrArn"}, + "min":1 + }, "FleetStatus":{ "type":"string", "enum":[ @@ -3237,7 +3662,7 @@ "documentation":"

The maximum number of players allowed across all game sessions currently being hosted on all instances in the fleet.

" } }, - "documentation":"

Current status of fleet utilization, including the number of game and player sessions being hosted.

" + "documentation":"

Current status of fleet utilization, including the number of game and player sessions being hosted.

" }, "FleetUtilizationList":{ "type":"list", @@ -3263,18 +3688,294 @@ }, "documentation":"

Set of key-value pairs that contain information about a game session. When included in a game session request, these properties communicate details to be used when setting up the new game session. For example, a game property might specify a game mode, level, or map. Game properties are passed to the game server process when initiating a new game session. For more information, see the Amazon GameLift Developer Guide.

" }, - "GamePropertyKey":{ + "GamePropertyKey":{ + "type":"string", + "max":32 + }, + "GamePropertyList":{ + "type":"list", + "member":{"shape":"GameProperty"}, + "max":16 + }, + "GamePropertyValue":{ + "type":"string", + "max":96 + }, + "GameServer":{ + "type":"structure", + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupName", + "documentation":"

The name identifier for the game server group where the game server is located.

" + }, + "GameServerGroupArn":{ + "shape":"GameServerGroupArn", + "documentation":"

The ARN identifier for the game server group where the game server is located.

" + }, + "GameServerId":{ + "shape":"GameServerId", + "documentation":"

A custom string that uniquely identifies the game server. Game server IDs are developer-defined and are unique across all game server groups in an AWS account.

" + }, + "InstanceId":{ + "shape":"GameServerInstanceId", + "documentation":"

The unique identifier for the instance where the game server is located.

" + }, + "ConnectionInfo":{ + "shape":"GameServerConnectionInfo", + "documentation":"

The port and IP address that must be used to establish a client connection to the game server.

" + }, + "GameServerData":{ + "shape":"GameServerData", + "documentation":"

A set of custom game server properties, formatted as a single string value. This data is passed to a game client or service in response to requests ListGameServers or ClaimGameServer. This property can be updated using UpdateGameServer.

" + }, + "CustomSortKey":{ + "shape":"GameServerSortKey", + "documentation":"

A game server tag that can be used to request sorted lists of game servers when calling ListGameServers. Custom sort keys are developer-defined. This property can be updated using UpdateGameServer.

" + }, + "ClaimStatus":{ + "shape":"GameServerClaimStatus", + "documentation":"

Indicates when an available game server has been reserved but has not yet started hosting a game. Once it is claimed, game server remains in CLAIMED status for a maximum of one minute. During this time, game clients must connect to the game server and start the game, which triggers the game server to update its utilization status. After one minute, the game server claim status reverts to null.

" + }, + "UtilizationStatus":{ + "shape":"GameServerUtilizationStatus", + "documentation":"

Indicates whether the game server is currently available for new games or is busy. Possible statuses include:

  • AVAILABLE - The game server is available to be claimed. A game server that has been claimed remains in this status until it reports game hosting activity.

  • IN_USE - The game server is currently hosting a game session with players.

" + }, + "RegistrationTime":{ + "shape":"Timestamp", + "documentation":"

Time stamp indicating when the game server resource was created with a RegisterGameServer request. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + }, + "LastClaimTime":{ + "shape":"Timestamp", + "documentation":"

Time stamp indicating the last time the game server was claimed with a ClaimGameServer request. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\"). This value is used to calculate when the game server's claim status.

" + }, + "LastHealthCheckTime":{ + "shape":"Timestamp", + "documentation":"

Time stamp indicating the last time the game server was updated with health status using an UpdateGameServer request. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\"). After game server registration, this property is only changed when a game server update specifies a health check value.

" + } + }, + "documentation":"

This data type is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Properties describing a game server resource.

A game server resource is created by a successful call to RegisterGameServer and deleted by calling DeregisterGameServer.

" + }, + "GameServerClaimStatus":{ + "type":"string", + "enum":["CLAIMED"] + }, + "GameServerConnectionInfo":{ + "type":"string", + "max":512, + "min":1, + "pattern":".*\\S.*" + }, + "GameServerData":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*\\S.*" + }, + "GameServerGroup":{ + "type":"structure", + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupName", + "documentation":"

A developer-defined identifier for the game server group. The name is unique per Region per AWS account.

" + }, + "GameServerGroupArn":{ + "shape":"GameServerGroupArn", + "documentation":"

A generated unique ID for the game server group.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift to access your EC2 Auto Scaling groups. The submitted role is validated to ensure that it contains the necessary permissions for game server groups.

" + }, + "InstanceDefinitions":{ + "shape":"InstanceDefinitions", + "documentation":"

The set of EC2 instance types that GameLift FleetIQ can use when rebalancing and autoscaling instances in the group.

" + }, + "BalancingStrategy":{ + "shape":"BalancingStrategy", + "documentation":"

The fallback balancing method to use for the game server group when Spot instances in a Region become unavailable or are not viable for game hosting. Once triggered, this method remains active until Spot instances can once again be used. Method options include:

  • SPOT_ONLY -- If Spot instances are unavailable, the game server group provides no hosting capacity. No new instances are started, and the existing nonviable Spot instances are terminated (once current gameplay ends) and not replaced.

  • SPOT_PREFERRED -- If Spot instances are unavailable, the game server group continues to provide hosting capacity by using On-Demand instances. Existing nonviable Spot instances are terminated (once current gameplay ends) and replaced with new On-Demand instances.

" + }, + "GameServerProtectionPolicy":{ + "shape":"GameServerProtectionPolicy", + "documentation":"

A flag that indicates whether instances in the game server group are protected from early termination. Unprotected instances that have active game servers running may be terminated during a scale-down event, causing players to be dropped from the game. Protected instances cannot be terminated while there are active game servers running except in the event of a forced game server group deletion (see DeleteGameServerGroup). An exception to this is Spot Instances, which may be terminated by AWS regardless of protection status.

" + }, + "AutoScalingGroupArn":{ + "shape":"AutoScalingGroupArn", + "documentation":"

A generated unique ID for the EC2 Auto Scaling group with is associated with this game server group.

" + }, + "Status":{ + "shape":"GameServerGroupStatus", + "documentation":"

The current status of the game server group. Possible statuses include:

  • NEW - GameLift FleetIQ has validated the CreateGameServerGroup() request.

  • ACTIVATING - GameLift FleetIQ is setting up a game server group, which includes creating an autoscaling group in your AWS account.

  • ACTIVE - The game server group has been successfully created.

  • DELETE_SCHEDULED - A request to delete the game server group has been received.

  • DELETING - GameLift FleetIQ has received a valid DeleteGameServerGroup() request and is processing it. GameLift FleetIQ must first complete and release hosts before it deletes the autoscaling group and the game server group.

  • DELETED - The game server group has been successfully deleted.

  • ERROR - The asynchronous processes of activating or deleting a game server group has failed, resulting in an error state.

" + }, + "StatusReason":{ + "shape":"NonZeroAndMaxString", + "documentation":"

Additional information about the current game server group status. This information may provide additional insight on groups that in ERROR status.

" + }, + "SuspendedActions":{ + "shape":"GameServerGroupActions", + "documentation":"

A list of activities that are currently suspended for this game server group. If this property is empty, all activities are occurring.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

" + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

A time stamp indicating when this game server group was last updated.

" + } + }, + "documentation":"

This data type is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Properties describing a game server group resource. A game server group manages certain properties of a corresponding EC2 Auto Scaling group.

A game server group is created by a successful call to CreateGameServerGroup and deleted by calling DeleteGameServerGroup. Game server group activity can be temporarily suspended and resumed by calling SuspendGameServerGroup and ResumeGameServerGroup.

" + }, + "GameServerGroupAction":{ + "type":"string", + "enum":["REPLACE_INSTANCE_TYPES"] + }, + "GameServerGroupActions":{ + "type":"list", + "member":{"shape":"GameServerGroupAction"}, + "max":1, + "min":1 + }, + "GameServerGroupArn":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^arn:.*:gameservergroup\\/[a-zA-Z0-9-\\.]*" + }, + "GameServerGroupAutoScalingPolicy":{ + "type":"structure", + "required":["TargetTrackingConfiguration"], + "members":{ + "EstimatedInstanceWarmup":{ + "shape":"PositiveInteger", + "documentation":"

Length of time, in seconds, it takes for a new instance to start new game server processes and register with GameLift FleetIQ. Specifying a warm-up time can be useful, particularly with game servers that take a long time to start up, because it avoids prematurely starting new instances

" + }, + "TargetTrackingConfiguration":{ + "shape":"TargetTrackingConfiguration", + "documentation":"

Settings for a target-based scaling policy applied to Auto Scaling group. These settings are used to create a target-based policy that tracks the GameLift FleetIQ metric \"PercentUtilizedGameServers\" and specifies a target value for the metric. As player usage changes, the policy triggers to adjust the game server group capacity so that the metric returns to the target value.

" + } + }, + "documentation":"

This data type is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Configuration settings for intelligent autoscaling that uses target tracking. An autoscaling policy can be specified when a new game server group is created with CreateGameServerGroup. If a group has an autoscaling policy, the Auto Scaling group takes action based on this policy, in addition to (and potentially in conflict with) any other autoscaling policies that are separately applied to the Auto Scaling group.

" + }, + "GameServerGroupDeleteOption":{ + "type":"string", + "enum":[ + "SAFE_DELETE", + "FORCE_DELETE", + "RETAIN" + ] + }, + "GameServerGroupInstanceType":{ + "type":"string", + "enum":[ + "c4.large", + "c4.xlarge", + "c4.2xlarge", + "c4.4xlarge", + "c4.8xlarge", + "c5.large", + "c5.xlarge", + "c5.2xlarge", + "c5.4xlarge", + "c5.9xlarge", + "c5.12xlarge", + "c5.18xlarge", + "c5.24xlarge", + "r4.large", + "r4.xlarge", + "r4.2xlarge", + "r4.4xlarge", + "r4.8xlarge", + "r4.16xlarge", + "r5.large", + "r5.xlarge", + "r5.2xlarge", + "r5.4xlarge", + "r5.8xlarge", + "r5.12xlarge", + "r5.16xlarge", + "r5.24xlarge", + "m4.large", + "m4.xlarge", + "m4.2xlarge", + "m4.4xlarge", + "m4.10xlarge", + "m5.large", + "m5.xlarge", + "m5.2xlarge", + "m5.4xlarge", + "m5.8xlarge", + "m5.12xlarge", + "m5.16xlarge", + "m5.24xlarge" + ] + }, + "GameServerGroupName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9-\\.]+" + }, + "GameServerGroupNameOrArn":{ "type":"string", - "max":32 + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9-\\.]+|^arn:.*:gameservergroup\\/[a-zA-Z0-9-\\.]+" }, - "GamePropertyList":{ + "GameServerGroupStatus":{ + "type":"string", + "enum":[ + "NEW", + "ACTIVATING", + "ACTIVE", + "DELETE_SCHEDULED", + "DELETING", + "DELETED", + "ERROR" + ] + }, + "GameServerGroups":{ "type":"list", - "member":{"shape":"GameProperty"}, - "max":16 + "member":{"shape":"GameServerGroup"} }, - "GamePropertyValue":{ + "GameServerHealthCheck":{ "type":"string", - "max":96 + "enum":["HEALTHY"] + }, + "GameServerId":{ + "type":"string", + "max":128, + "min":3, + "pattern":"[a-zA-Z0-9-\\.]+" + }, + "GameServerInstanceId":{ + "type":"string", + "max":19, + "min":19, + "pattern":"^i-[0-9a-zA-Z]{17}$" + }, + "GameServerProtectionPolicy":{ + "type":"string", + "enum":[ + "NO_PROTECTION", + "FULL_PROTECTION" + ] + }, + "GameServerSortKey":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9-\\.]+" + }, + "GameServerUtilizationStatus":{ + "type":"string", + "enum":[ + "AVAILABLE", + "UTILIZED" + ] + }, + "GameServers":{ + "type":"list", + "member":{"shape":"GameServer"} }, "GameSession":{ "type":"structure", @@ -3292,7 +3993,7 @@ "documentation":"

A unique identifier for a fleet that the game session is running on.

" }, "FleetArn":{ - "shape":"ArnStringModel", + "shape":"FleetArn", "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet that this game session is running on.

" }, "CreationTime":{ @@ -3516,7 +4217,7 @@ "documentation":"

A descriptive label that is associated with game session queue. Queue names must be unique within each Region.

" }, "GameSessionQueueArn":{ - "shape":"ArnStringModel", + "shape":"GameSessionQueueArn", "documentation":"

Amazon Resource Name (ARN) that is assigned to a GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift game session queue ARN, the resource ID matches the Name value.

" }, "TimeoutInSeconds":{ @@ -3534,6 +4235,12 @@ }, "documentation":"

Configuration of a queue that is used to process game session placement requests. The queue configuration identifies several game features:

  • The destinations where a new game session can potentially be hosted. Amazon GameLift tries these destinations in an order based on either the queue's default order or player latency information, if provided in a placement request. With latency information, Amazon GameLift can place game sessions where the majority of players are reporting the lowest possible latency.

  • The length of time that placement requests can wait in the queue before timing out.

  • A set of optional latency policies that protect individual players from high latencies, preventing game sessions from being placed where any individual player is reporting latency higher than a policy's maximum.

" }, + "GameSessionQueueArn":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^arn:.*:gamesessionqueue\\/[a-zA-Z0-9-]+" + }, "GameSessionQueueDestination":{ "type":"structure", "members":{ @@ -3553,14 +4260,20 @@ "member":{"shape":"GameSessionQueue"} }, "GameSessionQueueName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9-]+" + }, + "GameSessionQueueNameOrArn":{ "type":"string", "max":256, "min":1, "pattern":"[a-zA-Z0-9-]+|^arn:.*:gamesessionqueue\\/[a-zA-Z0-9-]+" }, - "GameSessionQueueNameList":{ + "GameSessionQueueNameOrArnList":{ "type":"list", - "member":{"shape":"GameSessionQueueName"} + "member":{"shape":"GameSessionQueueNameOrArn"} }, "GameSessionStatus":{ "type":"string", @@ -3605,7 +4318,7 @@ ], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet that contains the instance you want access to. You can use either the fleet ID or ARN value. The fleet can be in any of the following statuses: ACTIVATING, ACTIVE, or ERROR. Fleets with an ERROR status may be accessible for a short time before they are deleted.

" }, "InstanceId":{ @@ -3625,6 +4338,12 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "IamRoleArn":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^arn:.*:role\\/[\\w+=,.@-]+" + }, "IdStringModel":{ "type":"string", "max":48, @@ -3718,6 +4437,27 @@ "documentation":"

Set of credentials required to remotely access a fleet instance. Access credentials are requested by calling GetInstanceAccess and returned in an InstanceAccess object.

", "sensitive":true }, + "InstanceDefinition":{ + "type":"structure", + "required":["InstanceType"], + "members":{ + "InstanceType":{ + "shape":"GameServerGroupInstanceType", + "documentation":"

An EC2 instance type designation.

" + }, + "WeightedCapacity":{ + "shape":"WeightedCapacity", + "documentation":"

Instance weighting that indicates how much this instance type contributes to the total capacity of a game server group. Instance weights are used by GameLift FleetIQ to calculate the instance type's cost per unit hour and better identify the most cost-effective options. For detailed information on weighting instance capacity, see Instance Weighting in the Amazon EC2 Auto Scaling User Guide. Default value is \"1\".

" + } + }, + "documentation":"

This data type is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

An allowed instance type for your game server group. GameLift FleetIQ periodically evaluates each defined instance type for viability. It then updates the Auto Scaling group with the list of viable instance types.

" + }, + "InstanceDefinitions":{ + "type":"list", + "member":{"shape":"InstanceDefinition"}, + "max":20, + "min":2 + }, "InstanceId":{ "type":"string", "pattern":"[a-zA-Z0-9\\.-]+" @@ -3814,6 +4554,42 @@ "key":{"shape":"NonEmptyString"}, "value":{"shape":"PositiveInteger"} }, + "LaunchTemplateId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]+" + }, + "LaunchTemplateName":{ + "type":"string", + "max":128, + "min":3, + "pattern":"[a-zA-Z0-9\\(\\)\\.\\-/_]+" + }, + "LaunchTemplateSpecification":{ + "type":"structure", + "members":{ + "LaunchTemplateId":{ + "shape":"LaunchTemplateId", + "documentation":"

A unique identifier for an existing EC2 launch template.

" + }, + "LaunchTemplateName":{ + "shape":"LaunchTemplateName", + "documentation":"

A readable identifier for an existing EC2 launch template.

" + }, + "Version":{ + "shape":"LaunchTemplateVersion", + "documentation":"

The version of the EC2 launch template to use. If no version is specified, the default version will be used. EC2 allows you to specify a default version for a launch template, if none is set, the default is the first version created.

" + } + }, + "documentation":"

This data type is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

An EC2 launch template that contains configuration settings and game server code to be deployed to all instances in a game server group.

" + }, + "LaunchTemplateVersion":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]+" + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -3881,7 +4657,7 @@ "members":{ "Builds":{ "shape":"BuildList", - "documentation":"

A collection of build records that match the request.

" + "documentation":"

A collection of build resources that match the request.

" }, "NextToken":{ "shape":"NonEmptyString", @@ -3894,12 +4670,12 @@ "type":"structure", "members":{ "BuildId":{ - "shape":"BuildId", - "documentation":"

A unique identifier for a build to return fleets for. Use this parameter to return only fleets using the specified build. Use either the build ID or ARN value.To retrieve all fleets, leave this parameter empty.

" + "shape":"BuildIdOrArn", + "documentation":"

A unique identifier for a build to return fleets for. Use this parameter to return only fleets using a specified build. Use either the build ID or ARN value. To retrieve all fleets, do not include either a BuildId and ScriptID parameter.

" }, "ScriptId":{ - "shape":"ScriptId", - "documentation":"

A unique identifier for a Realtime script to return fleets for. Use this parameter to return only fleets using the specified script. Use either the script ID or ARN value.To retrieve all fleets, leave this parameter empty.

" + "shape":"ScriptIdOrArn", + "documentation":"

A unique identifier for a Realtime script to return fleets for. Use this parameter to return only fleets using a specified script. Use either the script ID or ARN value. To retrieve all fleets, leave this parameter empty.

" }, "Limit":{ "shape":"PositiveInteger", @@ -3926,6 +4702,67 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "ListGameServerGroupsInput":{ + "type":"structure", + "members":{ + "Limit":{ + "shape":"PositiveInteger", + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + }, + "NextToken":{ + "shape":"NonZeroAndMaxString", + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" + } + } + }, + "ListGameServerGroupsOutput":{ + "type":"structure", + "members":{ + "GameServerGroups":{ + "shape":"GameServerGroups", + "documentation":"

A collection of game server group objects that match the request.

" + }, + "NextToken":{ + "shape":"NonZeroAndMaxString", + "documentation":"

A token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" + } + } + }, + "ListGameServersInput":{ + "type":"structure", + "required":["GameServerGroupName"], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

An identifier for the game server group for the game server you want to list. Use either the GameServerGroup name or ARN value.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

Indicates how to sort the returned data based on the game servers' custom key sort value. If this parameter is left empty, the list of game servers is returned in no particular order.

" + }, + "Limit":{ + "shape":"PositiveInteger", + "documentation":"

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

" + }, + "NextToken":{ + "shape":"NonZeroAndMaxString", + "documentation":"

A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this action. To start at the beginning of the result set, do not specify a value.

" + } + } + }, + "ListGameServersOutput":{ + "type":"structure", + "members":{ + "GameServers":{ + "shape":"GameServers", + "documentation":"

A collection of game server objects that match the request.

" + }, + "NextToken":{ + "shape":"NonZeroAndMaxString", + "documentation":"

A token that indicates where to resume retrieving results on the next call to this action. If no token is returned, these results represent the end of the list.

" + } + } + }, "ListScriptsInput":{ "type":"structure", "members":{ @@ -4258,6 +5095,10 @@ "type":"string", "min":1 }, + "NonNegativeDouble":{ + "type":"double", + "min":0 + }, "NonZeroAndMaxString":{ "type":"string", "max":1024, @@ -4279,6 +5120,14 @@ "AMAZON_LINUX_2" ] }, + "OutOfCapacityException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"} + }, + "documentation":"

The specified game server group has no available game servers to fulfill a ClaimGameServer request. Clients can retry such requests immediately or after a waiting period.

", + "exception":true + }, "PlacedPlayerSession":{ "type":"structure", "members":{ @@ -4404,7 +5253,7 @@ "documentation":"

A unique identifier for a fleet that the player's game session is running on.

" }, "FleetArn":{ - "shape":"ArnStringModel", + "shape":"FleetArn", "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet that the player's game session is running on.

" }, "CreationTime":{ @@ -4502,7 +5351,7 @@ "documentation":"

A descriptive label that is associated with a scaling policy. Policy names do not need to be unique. A fleet can have only one scaling policy with the same name.

" }, "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to apply this policy to. You can use either the fleet ID or ARN value. The fleet cannot be in any of the following statuses: ERROR or DELETING.

" }, "ScalingAdjustment":{ @@ -4554,12 +5403,59 @@ "type":"list", "member":{"shape":"ArnStringModel"} }, + "RegisterGameServerInput":{ + "type":"structure", + "required":[ + "GameServerGroupName", + "GameServerId", + "InstanceId" + ], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

An identifier for the game server group where the game server is running. You can use either the GameServerGroup name or ARN value.

" + }, + "GameServerId":{ + "shape":"GameServerId", + "documentation":"

A custom string that uniquely identifies the new game server. Game server IDs are developer-defined and must be unique across all game server groups in your AWS account.

" + }, + "InstanceId":{ + "shape":"GameServerInstanceId", + "documentation":"

The unique identifier for the instance where the game server is running. This ID is available in the instance metadata.

" + }, + "ConnectionInfo":{ + "shape":"GameServerConnectionInfo", + "documentation":"

Information needed to make inbound client connections to the game server. This might include IP address and port, DNS name, etc.

" + }, + "GameServerData":{ + "shape":"GameServerData", + "documentation":"

A set of custom game server properties, formatted as a single string value. This data is passed to a game client or service when it requests information on a game servers using ListGameServers or ClaimGameServer.

" + }, + "CustomSortKey":{ + "shape":"GameServerSortKey", + "documentation":"

A game server tag that can be used to request sorted lists of game servers using ListGameServers. Custom sort keys are developer-defined based on how you want to organize the retrieved game server information.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of labels to assign to the new game server resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see Tagging AWS Resources in the AWS General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.

" + } + } + }, + "RegisterGameServerOutput":{ + "type":"structure", + "members":{ + "GameServer":{ + "shape":"GameServer", + "documentation":"

Object that describes the newly created game server resource.

" + } + } + }, "RequestUploadCredentialsInput":{ "type":"structure", "required":["BuildId"], "members":{ "BuildId":{ - "shape":"BuildId", + "shape":"BuildIdOrArn", "documentation":"

A unique identifier for a build to get credentials for. You can use either the build ID or ARN value.

" } }, @@ -4584,7 +5480,7 @@ "required":["AliasId"], "members":{ "AliasId":{ - "shape":"AliasId", + "shape":"AliasIdOrArn", "documentation":"

The unique identifier of the alias that you want to retrieve a fleet ID for. You can use either the alias ID or ARN value.

" } }, @@ -4598,7 +5494,7 @@ "documentation":"

The fleet identifier that the alias is pointing to.

" }, "FleetArn":{ - "shape":"ArnStringModel", + "shape":"FleetArn", "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet resource that this alias points to.

" } }, @@ -4618,6 +5514,32 @@ }, "documentation":"

A policy that limits the number of game sessions a player can create on the same fleet. This optional policy gives game owners control over how players can consume available game server resources. A resource creation policy makes the following statement: \"An individual player can create a maximum number of new game sessions within a specified time period\".

The policy is evaluated when a player tries to create a new game session. For example: Assume you have a policy of 10 new game sessions and a time period of 60 minutes. On receiving a CreateGameSession request, Amazon GameLift checks that the player (identified by CreatorId) has created fewer than 10 game sessions in the past 60 minutes.

" }, + "ResumeGameServerGroupInput":{ + "type":"structure", + "required":[ + "GameServerGroupName", + "ResumeActions" + ], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

The unique identifier of the game server group to resume activity on. Use either the GameServerGroup name or ARN value.

" + }, + "ResumeActions":{ + "shape":"GameServerGroupActions", + "documentation":"

The action to resume for this game server group.

" + } + } + }, + "ResumeGameServerGroupOutput":{ + "type":"structure", + "members":{ + "GameServerGroup":{ + "shape":"GameServerGroup", + "documentation":"

An object that describes the game server group resource, with the SuspendedActions property updated to reflect the resumed activity.

" + } + } + }, "RoutingStrategy":{ "type":"structure", "members":{ @@ -4669,14 +5591,14 @@ "documentation":"

The maximum amount of time (in seconds) that a game session can remain in status ACTIVATING. If the game session is not active before the timeout, activation is terminated and the game session status is changed to TERMINATED.

" } }, - "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. Server processes run either a custom game build executable or a Realtime Servers script. Each instance in the fleet starts the specified server processes and continues to start new processes as existing processes end. Each instance regularly checks for an updated runtime configuration.

The runtime configuration enables the instances in a fleet to run multiple processes simultaneously. Learn more about Running Multiple Processes on a Fleet .

A Amazon GameLift instance is limited to 50 processes running simultaneously. To calculate the total number of processes in a runtime configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object.

" + "documentation":"

A collection of server process configurations that describe what processes to run on each instance in a fleet. Server processes run either a custom game build executable or a Realtime Servers script. Each instance in the fleet starts the specified server processes and continues to start new processes as existing processes end. Each instance regularly checks for an updated runtime configuration.

The runtime configuration enables the instances in a fleet to run multiple processes simultaneously. Learn more about Running Multiple Processes on a Fleet .

A Amazon GameLift instance is limited to 50 processes running simultaneously. To calculate the total number of processes in a runtime configuration, add the values of the ConcurrentExecutions parameter for each ServerProcess object.

" }, "S3Location":{ "type":"structure", "members":{ "Bucket":{ "shape":"NonEmptyString", - "documentation":"

An Amazon S3 bucket identifier. This is the name of the S3 bucket.

" + "documentation":"

An S3 bucket identifier. This is the name of the S3 bucket.

" }, "Key":{ "shape":"NonEmptyString", @@ -4691,7 +5613,7 @@ "documentation":"

The version of the file, if object versioning is turned on for the bucket. Amazon GameLift uses this information when retrieving files from an S3 bucket that you own. Use this parameter to specify a specific version of the file. If not set, the latest version of the file is retrieved.

" } }, - "documentation":"

The location in Amazon S3 where build or script files are stored for access by Amazon GameLift. This location is specified in CreateBuild, CreateScript, and UpdateScript requests.

" + "documentation":"

The location in S3 where build or script files are stored for access by Amazon GameLift. This location is specified in CreateBuild, CreateScript, and UpdateScript requests.

" }, "ScalingAdjustmentType":{ "type":"string", @@ -4803,6 +5725,10 @@ "pattern":"^arn:.*:script\\/script-\\S+" }, "ScriptId":{ + "type":"string", + "pattern":"^script-\\S+" + }, + "ScriptIdOrArn":{ "type":"string", "pattern":"^script-\\S+|^arn:.*:script\\/script-\\S+" }, @@ -4814,11 +5740,11 @@ "type":"structure", "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to search for active game sessions. You can use either the fleet ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "AliasId":{ - "shape":"AliasId", + "shape":"AliasIdOrArn", "documentation":"

A unique identifier for an alias associated with the fleet to search for active game sessions. You can use either the alias ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both.

" }, "FilterExpression":{ @@ -4888,6 +5814,13 @@ "min":0, "pattern":"[a-zA-Z0-9:_/-]*" }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, "StartFleetActionsInput":{ "type":"structure", "required":[ @@ -4896,7 +5829,7 @@ ], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to start actions on. You can use either the fleet ID or ARN value.

" }, "Actions":{ @@ -4923,8 +5856,8 @@ "documentation":"

A unique identifier to assign to the new game session placement. This value is developer-defined. The value must be unique across all Regions and cannot be reused unless you are resubmitting a canceled or timed-out placement request.

" }, "GameSessionQueueName":{ - "shape":"GameSessionQueueName", - "documentation":"

Name of the queue to use to place the new game session. You can use either the qieue name or ARN value.

" + "shape":"GameSessionQueueNameOrArn", + "documentation":"

Name of the queue to use to place the new game session. You can use either the queue name or ARN value.

" }, "GameProperties":{ "shape":"GamePropertyList", @@ -5040,7 +5973,7 @@ ], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to stop actions on. You can use either the fleet ID or ARN value.

" }, "Actions":{ @@ -5101,6 +6034,32 @@ "member":{"shape":"NonZeroAndMaxString"} }, "StringModel":{"type":"string"}, + "SuspendGameServerGroupInput":{ + "type":"structure", + "required":[ + "GameServerGroupName", + "SuspendActions" + ], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

The unique identifier of the game server group to stop activity on. Use either the GameServerGroup name or ARN value.

" + }, + "SuspendActions":{ + "shape":"GameServerGroupActions", + "documentation":"

The action to suspend for this game server group.

" + } + } + }, + "SuspendGameServerGroupOutput":{ + "type":"structure", + "members":{ + "GameServerGroup":{ + "shape":"GameServerGroup", + "documentation":"

An object that describes the game server group resource, with the SuspendedActions property updated to reflect the suspended activity.

" + } + } + }, "Tag":{ "type":"structure", "required":[ @@ -5182,6 +6141,17 @@ }, "documentation":"

Settings for a target-based scaling policy (see ScalingPolicy. A target-based policy tracks a particular fleet metric specifies a target value for the metric. As player usage changes, the policy triggers Amazon GameLift to adjust capacity so that the metric returns to the target value. The target configuration specifies settings as needed for the target based policy, including the target value.

" }, + "TargetTrackingConfiguration":{ + "type":"structure", + "required":["TargetValue"], + "members":{ + "TargetValue":{ + "shape":"NonNegativeDouble", + "documentation":"

Desired value to use with a game server group target-based scaling policy.

" + } + }, + "documentation":"

This data type is part of Amazon GameLift FleetIQ with game server groups, which is in preview release and is subject to change.

Settings for a target-based scaling policy applied to Auto Scaling group. These settings are used to create a target-based policy that tracks the GameLift FleetIQ metric \"PercentUtilizedGameServers\" and specifies a target value for the metric. As player usage changes, the policy triggers to adjust the game server group capacity so that the metric returns to the target value.

" + }, "TerminalRoutingStrategyException":{ "type":"structure", "members":{ @@ -5216,11 +6186,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) that is assigned to and uniquely identifies the GameLift resource that you want to remove tags from. GameLift resource ARNs are included in the data object for the resource, which can be retrieved by calling a List or Describe action for the resource type.

" + "documentation":"

The Amazon Resource Name (ARN) that is assigned to and uniquely identifies the GameLift resource that you want to remove tags from. GameLift resource ARNs are included in the data object for the resource, which can be retrieved by calling a List or Describe action for the resource type.

" }, "TagKeys":{ "shape":"TagKeyList", - "documentation":"

A list of one or more tags to remove from the specified GameLift resource. Tags are developer-defined and structured as key-value pairs.

" + "documentation":"

A list of one or more tag keys to remove from the specified GameLift resource. An AWS resource can have only one tag with a specific tag key, so specifying the tag key identifies which tag to remove.

" } } }, @@ -5234,7 +6204,7 @@ "required":["AliasId"], "members":{ "AliasId":{ - "shape":"AliasId", + "shape":"AliasIdOrArn", "documentation":"

A unique identifier for the alias that you want to update. You can use either the alias ID or ARN value.

" }, "Name":{ @@ -5267,7 +6237,7 @@ "required":["BuildId"], "members":{ "BuildId":{ - "shape":"BuildId", + "shape":"BuildIdOrArn", "documentation":"

A unique identifier for a build to update. You can use either the build ID or ARN value.

" }, "Name":{ @@ -5286,7 +6256,7 @@ "members":{ "Build":{ "shape":"Build", - "documentation":"

The updated build record.

" + "documentation":"

The updated build resource.

" } }, "documentation":"

Represents the returned data in response to a request action.

" @@ -5296,7 +6266,7 @@ "required":["FleetId"], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to update attribute metadata for. You can use either the fleet ID or ARN value.

" }, "Name":{ @@ -5337,7 +6307,7 @@ "required":["FleetId"], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to update capacity for. You can use either the fleet ID or ARN value.

" }, "DesiredInstances":{ @@ -5370,16 +6340,16 @@ "required":["FleetId"], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to update port settings for. You can use either the fleet ID or ARN value.

" }, "InboundPermissionAuthorizations":{ "shape":"IpPermissionsList", - "documentation":"

A collection of port settings to be added to the fleet record.

" + "documentation":"

A collection of port settings to be added to the fleet resource.

" }, "InboundPermissionRevocations":{ "shape":"IpPermissionsList", - "documentation":"

A collection of port settings to be removed from the fleet record.

" + "documentation":"

A collection of port settings to be removed from the fleet resource.

" } }, "documentation":"

Represents the input for a request action.

" @@ -5394,6 +6364,83 @@ }, "documentation":"

Represents the returned data in response to a request action.

" }, + "UpdateGameServerGroupInput":{ + "type":"structure", + "required":["GameServerGroupName"], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

The unique identifier of the game server group to update. Use either the GameServerGroup name or ARN value.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift to access your EC2 Auto Scaling groups. The submitted role is validated to ensure that it contains the necessary permissions for game server groups.

" + }, + "InstanceDefinitions":{ + "shape":"InstanceDefinitions", + "documentation":"

An updated list of EC2 instance types to use when creating instances in the group. The instance definition must specify instance types that are supported by GameLift FleetIQ, and must include at least two instance types. This updated list replaces the entire current list of instance definitions for the game server group. For more information on instance types, see EC2 Instance Types in the Amazon EC2 User Guide..

" + }, + "GameServerProtectionPolicy":{ + "shape":"GameServerProtectionPolicy", + "documentation":"

A flag that indicates whether instances in the game server group are protected from early termination. Unprotected instances that have active game servers running may by terminated during a scale-down event, causing players to be dropped from the game. Protected instances cannot be terminated while there are active game servers running. An exception to this is Spot Instances, which may be terminated by AWS regardless of protection status. This property is set to NO_PROTECTION by default.

" + }, + "BalancingStrategy":{ + "shape":"BalancingStrategy", + "documentation":"

The fallback balancing method to use for the game server group when Spot instances in a Region become unavailable or are not viable for game hosting. Once triggered, this method remains active until Spot instances can once again be used. Method options include:

  • SPOT_ONLY -- If Spot instances are unavailable, the game server group provides no hosting capacity. No new instances are started, and the existing nonviable Spot instances are terminated (once current gameplay ends) and not replaced.

  • SPOT_PREFERRED -- If Spot instances are unavailable, the game server group continues to provide hosting capacity by using On-Demand instances. Existing nonviable Spot instances are terminated (once current gameplay ends) and replaced with new On-Demand instances.

" + } + } + }, + "UpdateGameServerGroupOutput":{ + "type":"structure", + "members":{ + "GameServerGroup":{ + "shape":"GameServerGroup", + "documentation":"

An object that describes the game server group resource with updated properties.

" + } + } + }, + "UpdateGameServerInput":{ + "type":"structure", + "required":[ + "GameServerGroupName", + "GameServerId" + ], + "members":{ + "GameServerGroupName":{ + "shape":"GameServerGroupNameOrArn", + "documentation":"

An identifier for the game server group where the game server is running. Use either the GameServerGroup name or ARN value.

" + }, + "GameServerId":{ + "shape":"GameServerId", + "documentation":"

The identifier for the game server to be updated.

" + }, + "GameServerData":{ + "shape":"GameServerData", + "documentation":"

A set of custom game server properties, formatted as a single string value. This data is passed to a game client or service when it requests information on a game servers using DescribeGameServer or ClaimGameServer.

" + }, + "CustomSortKey":{ + "shape":"GameServerSortKey", + "documentation":"

A game server tag that can be used to request sorted lists of game servers using ListGameServers. Custom sort keys are developer-defined based on how you want to organize the retrieved game server information.

" + }, + "UtilizationStatus":{ + "shape":"GameServerUtilizationStatus", + "documentation":"

Indicates whether the game server is available or is currently hosting gameplay.

" + }, + "HealthCheck":{ + "shape":"GameServerHealthCheck", + "documentation":"

Indicates health status of the game server. An update that explicitly includes this parameter updates the game server's LastHealthCheckTime time stamp.

" + } + } + }, + "UpdateGameServerOutput":{ + "type":"structure", + "members":{ + "GameServer":{ + "shape":"GameServer", + "documentation":"

Object that describes the newly updated game server resource.

" + } + } + }, "UpdateGameSessionInput":{ "type":"structure", "required":["GameSessionId"], @@ -5436,7 +6483,7 @@ "required":["Name"], "members":{ "Name":{ - "shape":"GameSessionQueueName", + "shape":"GameSessionQueueNameOrArn", "documentation":"

A descriptive label that is associated with game session queue. Queue names must be unique within each Region. You can use either the queue ID or ARN value.

" }, "TimeoutInSeconds":{ @@ -5541,7 +6588,7 @@ ], "members":{ "FleetId":{ - "shape":"FleetId", + "shape":"FleetIdOrArn", "documentation":"

A unique identifier for a fleet to update runtime configuration for. You can use either the fleet ID or ARN value.

" }, "RuntimeConfiguration":{ @@ -5566,7 +6613,7 @@ "required":["ScriptId"], "members":{ "ScriptId":{ - "shape":"ScriptId", + "shape":"ScriptIdOrArn", "documentation":"

A unique identifier for a Realtime script to update. You can use either the script ID or ARN value.

" }, "Name":{ @@ -5655,7 +6702,7 @@ "documentation":"

A unique identifier for a fleet. This ID determines the ID of the Amazon GameLift VPC for your fleet.

" }, "FleetArn":{ - "shape":"ArnStringModel", + "shape":"FleetArn", "documentation":"

The Amazon Resource Name (ARN) associated with the GameLift fleet resource for this connection.

" }, "IpV4CidrBlock":{ @@ -5699,6 +6746,24 @@ }, "documentation":"

Represents status information for a VPC peering connection. Status is associated with a VpcPeeringConnection object. Status codes and messages are provided from EC2 (see VpcPeeringConnectionStateReason). Connection status information is also communicated as a fleet Event.

" }, + "VpcSubnet":{ + "type":"string", + "max":15, + "min":15, + "pattern":"^subnet-[0-9a-z]{8}$" + }, + "VpcSubnets":{ + "type":"list", + "member":{"shape":"VpcSubnet"}, + "max":20, + "min":1 + }, + "WeightedCapacity":{ + "type":"string", + "max":3, + "min":1, + "pattern":"^[\\u0031-\\u0039][\\u0030-\\u0039]{0,2}$" + }, "WholeNumber":{ "type":"integer", "min":0 @@ -5708,5 +6773,5 @@ "max":5000000 } }, - "documentation":"Amazon GameLift Service

Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Use Amazon GameLift for these tasks: (1) set up computing resources and deploy your game servers, (2) run game sessions and get players into games, (3) automatically scale your resources to meet player demand and manage costs, and (4) track in-depth metrics on game server performance and player usage.

When setting up hosting resources, you can deploy your custom game server or use the Amazon GameLift Realtime Servers. Realtime Servers gives you the ability to quickly stand up lightweight, efficient game servers with the core Amazon GameLift infrastructure already built in.

Get Amazon GameLift Tools and Resources

This reference guide describes the low-level service API for Amazon GameLift and provides links to language-specific SDK reference topics. See also Amazon GameLift Tools and Resources.

API Summary

The Amazon GameLift service API includes two key sets of actions:

  • Manage game sessions and player access -- Integrate this functionality into game client services in order to create new game sessions, retrieve information on existing game sessions; reserve a player slot in a game session, request matchmaking, etc.

  • Configure and manage game server resources -- Manage your Amazon GameLift hosting resources, including builds, scripts, fleets, queues, and aliases. Set up matchmakers, configure auto-scaling, retrieve game logs, and get hosting and game metrics.

Task-based list of API actions

" + "documentation":"Amazon GameLift Service

Amazon GameLift provides a range of multiplayer game hosting solutions. As a fully managed service, GameLift helps you:

  • Set up EC2-based computing resources and use GameLift FleetIQ to and deploy your game servers on low-cost, reliable Spot instances.

  • Track game server availability and route players into game sessions to minimize latency.

  • Automatically scale your resources to meet player demand and manage costs

  • Optionally add FlexMatch matchmaking.

With GameLift as a managed service, you have the option to deploy your custom game server or use Amazon GameLift Realtime Servers to quickly stand up lightweight game servers for your game. Realtime Servers provides an efficient game server framework with core Amazon GameLift infrastructure already built in.

Now in Public Preview:

Use GameLift FleetIQ as a standalone feature with EC2 instances and Auto Scaling groups. GameLift FleetIQ provides optimizations that make low-cost Spot instances viable for game hosting. This extension of GameLift FleetIQ gives you access to these optimizations while managing your EC2 instances and Auto Scaling groups within your own AWS account.

Get Amazon GameLift Tools and Resources

This reference guide describes the low-level service API for Amazon GameLift and provides links to language-specific SDK reference topics. See also Amazon GameLift Tools and Resources.

API Summary

The Amazon GameLift service API includes two key sets of actions:

  • Manage game sessions and player access -- Integrate this functionality into game client services in order to create new game sessions, retrieve information on existing game sessions; reserve a player slot in a game session, request matchmaking, etc.

  • Configure and manage game server resources -- Manage your Amazon GameLift hosting resources, including builds, scripts, fleets, queues, and aliases. Set up matchmakers, configure auto-scaling, retrieve game logs, and get hosting and game metrics.

Task-based list of API actions

" } diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index 928a1410fc8d..429e985910ae 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index 7cbc3d80565d..f55090d79077 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/glue/pom.xml b/services/glue/pom.xml index 0a01b9916ece..f7d2a0a33d49 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 glue diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 52737c774680..a1389e06d36c 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -481,6 +481,40 @@ ], "documentation":"

Removes a classifier from the Data Catalog.

" }, + "DeleteColumnStatisticsForPartition":{ + "name":"DeleteColumnStatisticsForPartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteColumnStatisticsForPartitionRequest"}, + "output":{"shape":"DeleteColumnStatisticsForPartitionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} + ], + "documentation":"

Delete the partition column statistics of a column.

" + }, + "DeleteColumnStatisticsForTable":{ + "name":"DeleteColumnStatisticsForTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteColumnStatisticsForTableRequest"}, + "output":{"shape":"DeleteColumnStatisticsForTableResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} + ], + "documentation":"

Retrieves table statistics of columns.

" + }, "DeleteConnection":{ "name":"DeleteConnection", "http":{ @@ -744,6 +778,40 @@ ], "documentation":"

Lists all classifier objects in the Data Catalog.

" }, + "GetColumnStatisticsForPartition":{ + "name":"GetColumnStatisticsForPartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetColumnStatisticsForPartitionRequest"}, + "output":{"shape":"GetColumnStatisticsForPartitionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} + ], + "documentation":"

Retrieves partition statistics of columns.

" + }, + "GetColumnStatisticsForTable":{ + "name":"GetColumnStatisticsForTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetColumnStatisticsForTableRequest"}, + "output":{"shape":"GetColumnStatisticsForTableResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} + ], + "documentation":"

Retrieves table statistics of columns.

" + }, "GetConnection":{ "name":"GetConnection", "http":{ @@ -1121,6 +1189,22 @@ ], "documentation":"

Gets code to perform a specified mapping.

" }, + "GetResourcePolicies":{ + "name":"GetResourcePolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetResourcePoliciesRequest"}, + "output":{"shape":"GetResourcePoliciesResponse"}, + "errors":[ + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"}, + {"shape":"GlueEncryptionException"} + ], + "documentation":"

Retrieves the security configurations for the resource policies set on individual resources, and also the account-level policy.

" + }, "GetResourcePolicy":{ "name":"GetResourcePolicy", "http":{ @@ -1774,6 +1858,23 @@ ], "documentation":"

Stops a specified trigger.

" }, + "StopWorkflowRun":{ + "name":"StopWorkflowRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopWorkflowRunRequest"}, + "output":{"shape":"StopWorkflowRunResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"IllegalWorkflowStateException"} + ], + "documentation":"

Stops the execution of the specified workflow run.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -1822,6 +1923,40 @@ ], "documentation":"

Modifies an existing classifier (a GrokClassifier, an XMLClassifier, a JsonClassifier, or a CsvClassifier, depending on which field is present).

" }, + "UpdateColumnStatisticsForPartition":{ + "name":"UpdateColumnStatisticsForPartition", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateColumnStatisticsForPartitionRequest"}, + "output":{"shape":"UpdateColumnStatisticsForPartitionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} + ], + "documentation":"

Creates or updates partition statistics of columns.

" + }, + "UpdateColumnStatisticsForTable":{ + "name":"UpdateColumnStatisticsForTable", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateColumnStatisticsForTableRequest"}, + "output":{"shape":"UpdateColumnStatisticsForTableResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"GlueEncryptionException"} + ], + "documentation":"

Creates or updates table statistics of columns.

" + }, "UpdateConnection":{ "name":"UpdateConnection", "http":{ @@ -2507,7 +2642,54 @@ "type":"list", "member":{"shape":"BatchStopJobRunSuccessfulSubmission"} }, + "BinaryColumnStatisticsData":{ + "type":"structure", + "required":[ + "MaximumLength", + "AverageLength", + "NumberOfNulls" + ], + "members":{ + "MaximumLength":{ + "shape":"NonNegativeLong", + "documentation":"

Maximum length of the column.

" + }, + "AverageLength":{ + "shape":"NonNegativeDouble", + "documentation":"

Average length of the column.

" + }, + "NumberOfNulls":{ + "shape":"NonNegativeLong", + "documentation":"

Number of nulls.

" + } + }, + "documentation":"

Defines a binary column statistics data.

" + }, + "Blob":{"type":"blob"}, "Boolean":{"type":"boolean"}, + "BooleanColumnStatisticsData":{ + "type":"structure", + "required":[ + "NumberOfTrues", + "NumberOfFalses", + "NumberOfNulls" + ], + "members":{ + "NumberOfTrues":{ + "shape":"NonNegativeLong", + "documentation":"

Number of true value.

" + }, + "NumberOfFalses":{ + "shape":"NonNegativeLong", + "documentation":"

Number of false value.

" + }, + "NumberOfNulls":{ + "shape":"NonNegativeLong", + "documentation":"

Number of nulls.

" + } + }, + "documentation":"

Defines a boolean column statistics.

" + }, "BooleanNullable":{"type":"boolean"}, "BooleanValue":{"type":"boolean"}, "BoundedPartitionValueList":{ @@ -2791,6 +2973,24 @@ }, "documentation":"

A column in a Table.

" }, + "ColumnError":{ + "type":"structure", + "members":{ + "ColumnName":{ + "shape":"NameString", + "documentation":"

The name of the column.

" + }, + "Error":{ + "shape":"ErrorDetail", + "documentation":"

The error message occurred during operation.

" + } + }, + "documentation":"

Defines a column containing error.

" + }, + "ColumnErrors":{ + "type":"list", + "member":{"shape":"ColumnError"} + }, "ColumnList":{ "type":"list", "member":{"shape":"Column"} @@ -2801,6 +3001,107 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "ColumnStatistics":{ + "type":"structure", + "required":[ + "ColumnName", + "ColumnType", + "AnalyzedTime", + "StatisticsData" + ], + "members":{ + "ColumnName":{ + "shape":"NameString", + "documentation":"

The name of the column.

" + }, + "ColumnType":{ + "shape":"TypeString", + "documentation":"

The type of the column.

" + }, + "AnalyzedTime":{ + "shape":"Timestamp", + "documentation":"

The analyzed time of the column statistics.

" + }, + "StatisticsData":{ + "shape":"ColumnStatisticsData", + "documentation":"

The statistics of the column.

" + } + }, + "documentation":"

Defines a column statistics.

" + }, + "ColumnStatisticsData":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"ColumnStatisticsType", + "documentation":"

The name of the column.

" + }, + "BooleanColumnStatisticsData":{ + "shape":"BooleanColumnStatisticsData", + "documentation":"

Boolean Column Statistics Data.

" + }, + "DateColumnStatisticsData":{ + "shape":"DateColumnStatisticsData", + "documentation":"

Date Column Statistics Data.

" + }, + "DecimalColumnStatisticsData":{ + "shape":"DecimalColumnStatisticsData", + "documentation":"

Decimal Column Statistics Data.

" + }, + "DoubleColumnStatisticsData":{ + "shape":"DoubleColumnStatisticsData", + "documentation":"

Double Column Statistics Data.

" + }, + "LongColumnStatisticsData":{ + "shape":"LongColumnStatisticsData", + "documentation":"

Long Column Statistics Data.

" + }, + "StringColumnStatisticsData":{ + "shape":"StringColumnStatisticsData", + "documentation":"

String Column Statistics Data.

" + }, + "BinaryColumnStatisticsData":{ + "shape":"BinaryColumnStatisticsData", + "documentation":"

Binary Column Statistics Data.

" + } + }, + "documentation":"

Defines a column statistics data.

" + }, + "ColumnStatisticsError":{ + "type":"structure", + "members":{ + "ColumnStatistics":{ + "shape":"ColumnStatistics", + "documentation":"

The ColumnStatistics of the column.

" + }, + "Error":{ + "shape":"ErrorDetail", + "documentation":"

The error message occurred during operation.

" + } + }, + "documentation":"

Defines a column containing error.

" + }, + "ColumnStatisticsErrors":{ + "type":"list", + "member":{"shape":"ColumnStatisticsError"} + }, + "ColumnStatisticsList":{ + "type":"list", + "member":{"shape":"ColumnStatistics"} + }, + "ColumnStatisticsType":{ + "type":"string", + "enum":[ + "BOOLEAN", + "DATE", + "DECIMAL", + "DOUBLE", + "LONG", + "STRING", + "BINARY" + ] + }, "ColumnTypeString":{ "type":"string", "max":131072, @@ -2863,7 +3164,7 @@ }, "State":{ "shape":"JobRunState", - "documentation":"

The condition state. Currently, the values supported are SUCCEEDED, STOPPED, TIMEOUT, and FAILED.

" + "documentation":"

The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED, STOPPED, FAILED, and TIMEOUT. The only crawler states that a trigger can listen for are SUCCEEDED, FAILED, and CANCELLED.

" }, "CrawlerName":{ "shape":"NameString", @@ -2926,7 +3227,7 @@ }, "ConnectionType":{ "shape":"ConnectionType", - "documentation":"

The type of the connection. Currently, only JDBC is supported; SFTP is not supported.

" + "documentation":"

The type of the connection. Currently, SFTP is not supported.

" }, "MatchCriteria":{ "shape":"MatchCriteria", @@ -2934,7 +3235,7 @@ }, "ConnectionProperties":{ "shape":"ConnectionProperties", - "documentation":"

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".

  • PASSWORD - A password, if one is used, for the user name.

  • ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

  • JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.

  • JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

  • JDBC_ENGINE - The name of the JDBC engine to use.

  • JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

  • CONFIG_FILES - (Reserved for future use.)

  • INSTANCE_ID - The instance ID to use.

  • JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.

  • JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.

  • CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. AWS Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.

  • SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip AWS Glue’s validation of the customer certificate.

  • CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.

  • CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.

" + "documentation":"

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".

  • PASSWORD - A password, if one is used, for the user name.

  • ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

  • JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.

  • JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

  • JDBC_ENGINE - The name of the JDBC engine to use.

  • JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

  • CONFIG_FILES - (Reserved for future use.)

  • INSTANCE_ID - The instance ID to use.

  • JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.

  • JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.

  • CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. AWS Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.

  • SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip AWS Glue’s validation of the customer certificate.

  • CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.

  • CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.

  • KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.

" }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", @@ -2973,7 +3274,7 @@ }, "ConnectionType":{ "shape":"ConnectionType", - "documentation":"

The type of the connection. Currently, these types are supported:

  • JDBC - Designates a connection to a database through Java Database Connectivity (JDBC).

  • MONGODB - Designates a connection to a MongoDB document database.

SFTP is not supported.

" + "documentation":"

The type of the connection. Currently, these types are supported:

  • JDBC - Designates a connection to a database through Java Database Connectivity (JDBC).

  • KAFKA - Designates a connection to an Apache Kafka streaming platform.

  • MONGODB - Designates a connection to a MongoDB document database.

SFTP is not supported.

" }, "MatchCriteria":{ "shape":"MatchCriteria", @@ -3036,7 +3337,8 @@ "CUSTOM_JDBC_CERT", "SKIP_CUSTOM_JDBC_CERT_VALIDATION", "CUSTOM_JDBC_CERT_STRING", - "CONNECTION_URL" + "CONNECTION_URL", + "KAFKA_BOOTSTRAP_SERVERS" ] }, "ConnectionType":{ @@ -3044,7 +3346,8 @@ "enum":[ "JDBC", "SFTP", - "MONGODB" + "MONGODB", + "KAFKA" ] }, "ConnectionsList":{ @@ -3095,8 +3398,9 @@ "type":"string", "enum":[ "RUNNING", - "SUCCEEDED", + "CANCELLING", "CANCELLED", + "SUCCEEDED", "FAILED" ] }, @@ -3165,7 +3469,7 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" }, "CrawlerSecurityConfiguration":{ "shape":"CrawlerSecurityConfiguration", @@ -3380,7 +3684,7 @@ }, "Schedule":{ "shape":"CronExpression", - "documentation":"

A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *).

" + "documentation":"

A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

" }, "Classifiers":{ "shape":"ClassifierNameList", @@ -3396,7 +3700,7 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" }, "CrawlerSecurityConfiguration":{ "shape":"CrawlerSecurityConfiguration", @@ -3404,7 +3708,7 @@ }, "Tags":{ "shape":"TagsMap", - "documentation":"

The tags to use with this crawler request. You can use tags to limit access to the crawler. For more information, see AWS Tags in AWS Glue.

" + "documentation":"

The tags to use with this crawler request. You may use tags to limit access to the crawler. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.

" } } }, @@ -3754,7 +4058,7 @@ }, "JsonPath":{ "shape":"JsonPath", - "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" + "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" } }, "documentation":"

Specifies a JSON classifier for CreateClassifier to create.

" @@ -4220,10 +4524,32 @@ "CreateTableDefaultPermissions":{ "shape":"PrincipalPermissionsList", "documentation":"

Creates a set of default permissions on the table for principals.

" + }, + "TargetDatabase":{ + "shape":"DatabaseIdentifier", + "documentation":"

A DatabaseIdentifier structure that describes a target database for resource linking.

" + }, + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the database resides.

" } }, "documentation":"

The Database object represents a logical grouping of tables that might reside in a Hive metastore or an RDBMS.

" }, + "DatabaseIdentifier":{ + "type":"structure", + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the database resides.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database.

" + } + }, + "documentation":"

A structure that describes a target database for resource linking.

" + }, "DatabaseInput":{ "type":"structure", "required":["Name"], @@ -4247,6 +4573,10 @@ "CreateTableDefaultPermissions":{ "shape":"PrincipalPermissionsList", "documentation":"

Creates a set of default permissions on the table for principals.

" + }, + "TargetDatabase":{ + "shape":"DatabaseIdentifier", + "documentation":"

A DatabaseIdentifier structure that describes a target database for resource linking.

" } }, "documentation":"

The structure used to create or update a database.

" @@ -4256,6 +4586,76 @@ "member":{"shape":"Database"} }, "DatabaseName":{"type":"string"}, + "DateColumnStatisticsData":{ + "type":"structure", + "required":[ + "NumberOfNulls", + "NumberOfDistinctValues" + ], + "members":{ + "MinimumValue":{ + "shape":"Timestamp", + "documentation":"

Minimum value of the column.

" + }, + "MaximumValue":{ + "shape":"Timestamp", + "documentation":"

Maximum value of the column.

" + }, + "NumberOfNulls":{ + "shape":"NonNegativeLong", + "documentation":"

Number of nulls.

" + }, + "NumberOfDistinctValues":{ + "shape":"NonNegativeLong", + "documentation":"

Number of distinct values.

" + } + }, + "documentation":"

Defines a date column statistics data.

" + }, + "DecimalColumnStatisticsData":{ + "type":"structure", + "required":[ + "NumberOfNulls", + "NumberOfDistinctValues" + ], + "members":{ + "MinimumValue":{ + "shape":"DecimalNumber", + "documentation":"

Minimum value of the column.

" + }, + "MaximumValue":{ + "shape":"DecimalNumber", + "documentation":"

Maximum value of the column.

" + }, + "NumberOfNulls":{ + "shape":"NonNegativeLong", + "documentation":"

Number of nulls.

" + }, + "NumberOfDistinctValues":{ + "shape":"NonNegativeLong", + "documentation":"

Number of distinct values.

" + } + }, + "documentation":"

Defines a decimal column statistics data.

" + }, + "DecimalNumber":{ + "type":"structure", + "required":[ + "UnscaledValue", + "Scale" + ], + "members":{ + "UnscaledValue":{ + "shape":"Blob", + "documentation":"

The unscaled numeric value.

" + }, + "Scale":{ + "shape":"Integer", + "documentation":"

The scale that determines where the decimal point falls in the unscaled value.

" + } + }, + "documentation":"

Contains a numeric value in decimal format.

" + }, "DeleteBehavior":{ "type":"string", "enum":[ @@ -4279,6 +4679,73 @@ "members":{ } }, + "DeleteColumnStatisticsForPartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionValues", + "ColumnName" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the partitions reside.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the partitions' table.

" + }, + "PartitionValues":{ + "shape":"ValueStringList", + "documentation":"

A list of partition values identifying the partition.

" + }, + "ColumnName":{ + "shape":"NameString", + "documentation":"

Name of the column.

" + } + } + }, + "DeleteColumnStatisticsForPartitionResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteColumnStatisticsForTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "ColumnName" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the partitions reside.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the partitions' table.

" + }, + "ColumnName":{ + "shape":"NameString", + "documentation":"

The name of the column.

" + } + } + }, + "DeleteColumnStatisticsForTableResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteConnectionNameList":{ "type":"list", "member":{"shape":"NameString"}, @@ -4428,6 +4895,10 @@ "PolicyHashCondition":{ "shape":"HashString", "documentation":"

The hash value returned when this policy was set.

" + }, + "ResourceArn":{ + "shape":"GlueResourceArn", + "documentation":"

The ARN of the AWS Glue resource for the resource policy to be deleted.

" } } }, @@ -4718,12 +5189,47 @@ "max":25, "min":1 }, + "Double":{"type":"double"}, + "DoubleColumnStatisticsData":{ + "type":"structure", + "required":[ + "NumberOfNulls", + "NumberOfDistinctValues" + ], + "members":{ + "MinimumValue":{ + "shape":"Double", + "documentation":"

Minimum value of the column.

" + }, + "MaximumValue":{ + "shape":"Double", + "documentation":"

Maximum value of the column.

" + }, + "NumberOfNulls":{ + "shape":"NonNegativeLong", + "documentation":"

Number of nulls.

" + }, + "NumberOfDistinctValues":{ + "shape":"NonNegativeLong", + "documentation":"

Number of distinct values.

" + } + }, + "documentation":"

Defines a double column statistics data.

" + }, "DynamoDBTarget":{ "type":"structure", "members":{ "Path":{ "shape":"Path", "documentation":"

The name of the DynamoDB table to crawl.

" + }, + "scanAll":{ + "shape":"NullableBoolean", + "documentation":"

Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table.

A value of true means to scan all records, while a value of false means to sample the records. If no value is specified, the value defaults to true.

" + }, + "scanRate":{ + "shape":"NullableDouble", + "documentation":"

The percentage of the configured read capacity units to use by the AWS Glue crawler. Read capacity units is a term defined by DynamoDB, and is a numeric value that acts as rate limiter for the number of reads that can be performed on that table per second.

The valid values are null or a value between 0.1 to 1.5. A null value is used when user does not provide a value, and defaults to 0.5 of the configured Read Capacity Unit (for provisioned tables), or 0.25 of the max configured Read Capacity Unit (for tables using on-demand mode).

" } }, "documentation":"

Specifies an Amazon DynamoDB table to crawl.

" @@ -4750,6 +5256,13 @@ "type":"list", "member":{"shape":"Edge"} }, + "EnableHybridValues":{ + "type":"string", + "enum":[ + "TRUE", + "FALSE" + ] + }, "EncryptionAtRest":{ "type":"structure", "required":["CatalogEncryptionMode"], @@ -4972,42 +5485,131 @@ "members":{ "Name":{ "shape":"NameString", - "documentation":"

Name of the classifier to retrieve.

" + "documentation":"

Name of the classifier to retrieve.

" + } + } + }, + "GetClassifierResponse":{ + "type":"structure", + "members":{ + "Classifier":{ + "shape":"Classifier", + "documentation":"

The requested classifier.

" + } + } + }, + "GetClassifiersRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The size of the list to return (optional).

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

An optional continuation token.

" + } + } + }, + "GetClassifiersResponse":{ + "type":"structure", + "members":{ + "Classifiers":{ + "shape":"ClassifierList", + "documentation":"

The requested list of classifier objects.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token.

" + } + } + }, + "GetColumnNamesList":{ + "type":"list", + "member":{"shape":"NameString"}, + "max":100, + "min":0 + }, + "GetColumnStatisticsForPartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionValues", + "ColumnNames" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the partitions reside.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the partitions' table.

" + }, + "PartitionValues":{ + "shape":"ValueStringList", + "documentation":"

A list of partition values identifying the partition.

" + }, + "ColumnNames":{ + "shape":"GetColumnNamesList", + "documentation":"

A list of the column names.

" } } }, - "GetClassifierResponse":{ + "GetColumnStatisticsForPartitionResponse":{ "type":"structure", "members":{ - "Classifier":{ - "shape":"Classifier", - "documentation":"

The requested classifier.

" + "ColumnStatisticsList":{ + "shape":"ColumnStatisticsList", + "documentation":"

List of ColumnStatistics that failed to be retrieved.

" + }, + "Errors":{ + "shape":"ColumnErrors", + "documentation":"

Error occurred during retrieving column statistics data.

" } } }, - "GetClassifiersRequest":{ + "GetColumnStatisticsForTableRequest":{ "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "ColumnNames" + ], "members":{ - "MaxResults":{ - "shape":"PageSize", - "documentation":"

The size of the list to return (optional).

" + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

" }, - "NextToken":{ - "shape":"Token", - "documentation":"

An optional continuation token.

" + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the partitions reside.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the partitions' table.

" + }, + "ColumnNames":{ + "shape":"GetColumnNamesList", + "documentation":"

A list of the column names.

" } } }, - "GetClassifiersResponse":{ + "GetColumnStatisticsForTableResponse":{ "type":"structure", "members":{ - "Classifiers":{ - "shape":"ClassifierList", - "documentation":"

The requested list of classifier objects.

" + "ColumnStatisticsList":{ + "shape":"ColumnStatisticsList", + "documentation":"

List of ColumnStatistics that failed to be retrieved.

" }, - "NextToken":{ - "shape":"Token", - "documentation":"

A continuation token.

" + "Errors":{ + "shape":"ColumnErrors", + "documentation":"

List of ColumnStatistics that failed to be retrieved.

" } } }, @@ -5047,7 +5649,7 @@ }, "ConnectionType":{ "shape":"ConnectionType", - "documentation":"

The type of connections to return. Currently, only JDBC is supported; SFTP is not supported.

" + "documentation":"

The type of connections to return. Currently, SFTP is not supported.

" } }, "documentation":"

Filters the connection definitions that are returned by the GetConnections API operation.

" @@ -5220,6 +5822,10 @@ "MaxResults":{ "shape":"PageSize", "documentation":"

The maximum number of databases to return in one response.

" + }, + "ResourceShareType":{ + "shape":"ResourceShareType", + "documentation":"

Allows you to specify that you want to list the databases shared with your account. The allowable values are FOREIGN or ALL.

  • If set to FOREIGN, will list the databases shared with your account.

  • If set to ALL, will list the databases shared with your account, as well as the databases in yor local account.

" } } }, @@ -5811,9 +6417,43 @@ } } }, + "GetResourcePoliciesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if this is a continuation request.

" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"

The maximum size of a list to return.

" + } + } + }, + "GetResourcePoliciesResponse":{ + "type":"structure", + "members":{ + "GetResourcePoliciesResponseList":{ + "shape":"GetResourcePoliciesResponseList", + "documentation":"

A list of the individual resource policies and the account-level resource policy.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A continuation token, if the returned list does not contain the last resource policy available.

" + } + } + }, + "GetResourcePoliciesResponseList":{ + "type":"list", + "member":{"shape":"GluePolicy"} + }, "GetResourcePolicyRequest":{ "type":"structure", "members":{ + "ResourceArn":{ + "shape":"GlueResourceArn", + "documentation":"

The ARN of the AWS Glue resource for the resource policy to be retrieved. For more information about AWS Glue resource ARNs, see the AWS Glue ARN string pattern

" + } } }, "GetResourcePolicyResponse":{ @@ -6131,10 +6771,7 @@ }, "GetUserDefinedFunctionsRequest":{ "type":"structure", - "required":[ - "DatabaseName", - "Pattern" - ], + "required":["Pattern"], "members":{ "CatalogId":{ "shape":"CatalogIdString", @@ -6142,7 +6779,7 @@ }, "DatabaseName":{ "shape":"NameString", - "documentation":"

The name of the catalog database where the functions are located.

" + "documentation":"

The name of the catalog database where the functions are located. If none is provided, functions from all the databases across the catalog will be returned.

" }, "Pattern":{ "shape":"NameString", @@ -6296,6 +6933,28 @@ "documentation":"

An encryption operation failed.

", "exception":true }, + "GluePolicy":{ + "type":"structure", + "members":{ + "PolicyInJson":{ + "shape":"PolicyJsonString", + "documentation":"

Contains the requested policy document, in JSON format.

" + }, + "PolicyHash":{ + "shape":"HashString", + "documentation":"

Contains the hash value associated with this policy.

" + }, + "CreateTime":{ + "shape":"Timestamp", + "documentation":"

The date and time at which the policy was created.

" + }, + "UpdateTime":{ + "shape":"Timestamp", + "documentation":"

The date and time at which the policy was last updated.

" + } + }, + "documentation":"

A structure for returning a resource policy.

" + }, "GlueResourceArn":{ "type":"string", "max":10240, @@ -6370,11 +7029,11 @@ }, "GrokPattern":{ "shape":"GrokPattern", - "documentation":"

The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers.

" + "documentation":"

The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers.

" }, "CustomPatterns":{ "shape":"CustomPatterns", - "documentation":"

Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers.

" + "documentation":"

Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers.

" } }, "documentation":"

A classifier that uses grok patterns.

" @@ -6408,6 +7067,17 @@ "documentation":"

The same unique identifier was associated with two different records.

", "exception":true }, + "IllegalWorkflowStateException":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"MessageString", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

The workflow is in an invalid state to perform a requested operation.

", + "exception":true + }, "ImportCatalogToGlueRequest":{ "type":"structure", "members":{ @@ -6479,7 +7149,7 @@ }, "Exclusions":{ "shape":"PathList", - "documentation":"

A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.

" + "documentation":"

A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.

" } }, "documentation":"

Specifies a JDBC data store to crawl.

" @@ -6705,7 +7375,7 @@ }, "JobRunState":{ "shape":"JobRunState", - "documentation":"

The current state of the job run.

" + "documentation":"

The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses.

" }, "Arguments":{ "shape":"GenericMap", @@ -6881,7 +7551,7 @@ }, "JsonPath":{ "shape":"JsonPath", - "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" + "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" } }, "documentation":"

A classifier for JSON content.

" @@ -7194,6 +7864,33 @@ "type":"string", "enum":["EQUALS"] }, + "Long":{"type":"long"}, + "LongColumnStatisticsData":{ + "type":"structure", + "required":[ + "NumberOfNulls", + "NumberOfDistinctValues" + ], + "members":{ + "MinimumValue":{ + "shape":"Long", + "documentation":"

Minimum value of the column.

" + }, + "MaximumValue":{ + "shape":"Long", + "documentation":"

Maximum value of the column.

" + }, + "NumberOfNulls":{ + "shape":"NonNegativeLong", + "documentation":"

Number of nulls.

" + }, + "NumberOfDistinctValues":{ + "shape":"NonNegativeLong", + "documentation":"

Number of distinct values.

" + } + }, + "documentation":"

Defines a long column statistics data.

" + }, "MLTransform":{ "type":"structure", "members":{ @@ -7411,6 +8108,10 @@ "type":"integer", "min":0 }, + "NonNegativeLong":{ + "type":"long", + "min":0 + }, "NotificationProperty":{ "type":"structure", "members":{ @@ -7525,6 +8226,10 @@ "LastAnalyzedTime":{ "shape":"Timestamp", "documentation":"

The last time at which column statistics were computed for this partition.

" + }, + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the partition resides.

" } }, "documentation":"

Represents a slice of table data.

" @@ -7754,6 +8459,10 @@ "shape":"PolicyJsonString", "documentation":"

Contains the policy document to set, in JSON format.

" }, + "ResourceArn":{ + "shape":"GlueResourceArn", + "documentation":"

The ARN of the AWS Glue resource for the resource policy to be set. For more information about AWS Glue resource ARNs, see the AWS Glue ARN string pattern

" + }, "PolicyHashCondition":{ "shape":"HashString", "documentation":"

The hash value returned when the previous policy was set using PutResourcePolicy. Its purpose is to prevent concurrent modifications of a policy. Do not use this parameter if no previous policy has been set.

" @@ -7761,6 +8470,10 @@ "PolicyExistsCondition":{ "shape":"ExistCondition", "documentation":"

A value of MUST_EXIST is used to update a policy. A value of NOT_EXIST is used to create a new policy. If a value of NONE or a null value is used, the call will not depend on the existence of a policy.

" + }, + "EnableHybrid":{ + "shape":"EnableHybridValues", + "documentation":"

Allows you to specify if you want to use both resource-level and account/catalog-level resource policies. A resource-level policy is a policy attached to an individual resource such as a database or a table.

The default value of NO indicates that resource-level policies cannot co-exist with an account-level policy. A value of YES means the use of both resource-level and account/catalog-level resource policies is allowed.

" } } }, @@ -7844,6 +8557,13 @@ "documentation":"

A resource numerical limit was exceeded.

", "exception":true }, + "ResourceShareType":{ + "type":"string", + "enum":[ + "FOREIGN", + "ALL" + ] + }, "ResourceType":{ "type":"string", "enum":[ @@ -7915,7 +8635,7 @@ }, "Exclusions":{ "shape":"PathList", - "documentation":"

A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.

" + "documentation":"

A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.

" } }, "documentation":"

Specifies a data store in Amazon Simple Storage Service (Amazon S3).

" @@ -7930,7 +8650,7 @@ "members":{ "ScheduleExpression":{ "shape":"CronExpression", - "documentation":"

A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *).

" + "documentation":"

A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

" }, "State":{ "shape":"ScheduleState", @@ -8040,6 +8760,10 @@ "MaxResults":{ "shape":"PageSize", "documentation":"

The maximum number of tables to return in a single response.

" + }, + "ResourceShareType":{ + "shape":"ResourceShareType", + "documentation":"

Allows you to specify that you want to search the tables shared with your account. The allowable values are FOREIGN or ALL.

  • If set to FOREIGN, will search the tables shared with your account.

  • If set to ALL, will search the tables shared with your account, as well as the tables in yor local account.

" } } }, @@ -8447,6 +9171,28 @@ } } }, + "StopWorkflowRunRequest":{ + "type":"structure", + "required":[ + "Name", + "RunId" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"

The name of the workflow to stop.

" + }, + "RunId":{ + "shape":"IdString", + "documentation":"

The ID of the workflow run to stop.

" + } + } + }, + "StopWorkflowRunResponse":{ + "type":"structure", + "members":{ + } + }, "StorageDescriptor":{ "type":"structure", "members":{ @@ -8501,6 +9247,34 @@ }, "documentation":"

Describes the physical storage of table data.

" }, + "StringColumnStatisticsData":{ + "type":"structure", + "required":[ + "MaximumLength", + "AverageLength", + "NumberOfNulls", + "NumberOfDistinctValues" + ], + "members":{ + "MaximumLength":{ + "shape":"NonNegativeLong", + "documentation":"

Maximum value of the column.

" + }, + "AverageLength":{ + "shape":"NonNegativeDouble", + "documentation":"

Average value of the column.

" + }, + "NumberOfNulls":{ + "shape":"NonNegativeLong", + "documentation":"

Number of nulls.

" + }, + "NumberOfDistinctValues":{ + "shape":"NonNegativeLong", + "documentation":"

Number of distinct values.

" + } + }, + "documentation":"

Defines a string column statistics data.

" + }, "StringList":{ "type":"list", "member":{"shape":"GenericString"} @@ -8576,6 +9350,14 @@ "IsRegisteredWithLakeFormation":{ "shape":"Boolean", "documentation":"

Indicates whether the table has been registered with AWS Lake Formation.

" + }, + "TargetTable":{ + "shape":"TableIdentifier", + "documentation":"

A TableIdentifier structure that describes a target table for resource linking.

" + }, + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the table resides.

" } }, "documentation":"

Represents a collection of related data organized in columns and rows.

" @@ -8598,6 +9380,24 @@ "type":"list", "member":{"shape":"TableError"} }, + "TableIdentifier":{ + "type":"structure", + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the table resides.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database that contains the target table.

" + }, + "Name":{ + "shape":"NameString", + "documentation":"

The name of the target table.

" + } + }, + "documentation":"

A structure that describes a target table for resource linking.

" + }, "TableInput":{ "type":"structure", "required":["Name"], @@ -8649,6 +9449,10 @@ "Parameters":{ "shape":"ParametersMap", "documentation":"

These key-value pairs define properties associated with the table.

" + }, + "TargetTable":{ + "shape":"TableIdentifier", + "documentation":"

A TableIdentifier structure that describes a target table for resource linking.

" } }, "documentation":"

A structure used to define a table.

" @@ -9124,6 +9928,12 @@ }, "documentation":"

A structure used to provide information used to update a trigger. This object updates the previous trigger definition by overwriting it completely.

" }, + "TypeString":{ + "type":"string", + "max":20000, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, "URI":{ "type":"string", "max":1024, @@ -9185,6 +9995,87 @@ "members":{ } }, + "UpdateColumnStatisticsForPartitionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "PartitionValues", + "ColumnStatisticsList" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the partitions reside.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the partitions' table.

" + }, + "PartitionValues":{ + "shape":"ValueStringList", + "documentation":"

A list of partition values identifying the partition.

" + }, + "ColumnStatisticsList":{ + "shape":"UpdateColumnStatisticsList", + "documentation":"

A list of the column statistics.

" + } + } + }, + "UpdateColumnStatisticsForPartitionResponse":{ + "type":"structure", + "members":{ + "Errors":{ + "shape":"ColumnStatisticsErrors", + "documentation":"

Error occurred during updating column statistics data.

" + } + } + }, + "UpdateColumnStatisticsForTableRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "ColumnStatisticsList" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database where the partitions reside.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the partitions' table.

" + }, + "ColumnStatisticsList":{ + "shape":"UpdateColumnStatisticsList", + "documentation":"

A list of the column statistics.

" + } + } + }, + "UpdateColumnStatisticsForTableResponse":{ + "type":"structure", + "members":{ + "Errors":{ + "shape":"ColumnStatisticsErrors", + "documentation":"

List of ColumnStatisticsErrors.

" + } + } + }, + "UpdateColumnStatisticsList":{ + "type":"list", + "member":{"shape":"ColumnStatistics"}, + "max":25, + "min":0 + }, "UpdateConnectionRequest":{ "type":"structure", "required":[ @@ -9237,7 +10128,7 @@ }, "Schedule":{ "shape":"CronExpression", - "documentation":"

A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *).

" + "documentation":"

A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

" }, "Classifiers":{ "shape":"ClassifierNameList", @@ -9253,7 +10144,7 @@ }, "Configuration":{ "shape":"CrawlerConfiguration", - "documentation":"

The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" + "documentation":"

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" }, "CrawlerSecurityConfiguration":{ "shape":"CrawlerSecurityConfiguration", @@ -9276,7 +10167,7 @@ }, "Schedule":{ "shape":"CronExpression", - "documentation":"

The updated cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *).

" + "documentation":"

The updated cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

" } } }, @@ -9448,7 +10339,7 @@ }, "JsonPath":{ "shape":"JsonPath", - "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" + "documentation":"

A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in Writing JsonPath Custom Classifiers.

" } }, "documentation":"

Specifies a JSON classifier to be updated.

" @@ -9689,6 +10580,10 @@ "shape":"NameString", "documentation":"

The name of the function.

" }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the catalog database that contains the function.

" + }, "ClassName":{ "shape":"NameString", "documentation":"

The Java class that contains the function code.

" @@ -9708,6 +10603,10 @@ "ResourceUris":{ "shape":"ResourceUriList", "documentation":"

The resource URIs for the function.

" + }, + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the function resides.

" } }, "documentation":"

Represents the equivalent of a Hive user-defined function (UDF) definition.

" @@ -9922,7 +10821,9 @@ "type":"string", "enum":[ "RUNNING", - "COMPLETED" + "COMPLETED", + "STOPPING", + "STOPPED" ] }, "WorkflowRuns":{ diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index 1dbcf3a93125..4b8fb169350d 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index fe93b2ec7917..a008e5996151 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index ab51f01e3aa2..fb277f0e1ba3 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 guardduty diff --git a/services/guardduty/src/main/resources/codegen-resources/paginators-1.json b/services/guardduty/src/main/resources/codegen-resources/paginators-1.json index 717e540366dd..83ef33cce131 100644 --- a/services/guardduty/src/main/resources/codegen-resources/paginators-1.json +++ b/services/guardduty/src/main/resources/codegen-resources/paginators-1.json @@ -36,6 +36,12 @@ "limit_key": "MaxResults", "result_key": "Members" }, + "ListOrganizationAdminAccounts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "AdminAccounts" + }, "ListPublishingDestinations": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/guardduty/src/main/resources/codegen-resources/service-2.json b/services/guardduty/src/main/resources/codegen-resources/service-2.json index a84dd0cda552..df746d23d768 100644 --- a/services/guardduty/src/main/resources/codegen-resources/service-2.json +++ b/services/guardduty/src/main/resources/codegen-resources/service-2.json @@ -40,7 +40,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Archives GuardDuty findings specified by the list of finding IDs.

Only the master account can archive findings. Member accounts do not have permission to archive findings from their accounts.

" + "documentation":"

Archives GuardDuty findings that are specified by the list of finding IDs.

Only the master account can archive findings. Member accounts don't have permission to archive findings from their accounts.

" }, "CreateDetector":{ "name":"CreateDetector", @@ -55,7 +55,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Creates a single Amazon GuardDuty detector. A detector is a resource that represents the GuardDuty service. To start using GuardDuty, you must create a detector in each region that you enable the service. You can have only one detector per account per region.

" + "documentation":"

Creates a single Amazon GuardDuty detector. A detector is a resource that represents the GuardDuty service. To start using GuardDuty, you must create a detector in each Region where you enable the service. You can have only one detector per account per Region.

" }, "CreateFilter":{ "name":"CreateFilter", @@ -85,7 +85,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Creates a new IPSet, called Trusted IP list in the consoler user interface. An IPSet is a list IP addresses trusted for secure communication with AWS infrastructure and applications. GuardDuty does not generate findings for IP addresses included in IPSets. Only users from the master account can use this operation.

" + "documentation":"

Creates a new IPSet, which is called a trusted IP list in the console user interface. An IPSet is a list of IP addresses that are trusted for secure communication with AWS infrastructure and applications. GuardDuty doesn't generate findings for IP addresses that are included in IPSets. Only users from the master account can use this operation.

" }, "CreateMembers":{ "name":"CreateMembers", @@ -115,7 +115,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Creates a publishing destination to send findings to. The resource to send findings to must exist before you use this operation.

" + "documentation":"

Creates a publishing destination to export findings to. The resource to export findings to must exist before you use this operation.

" }, "CreateSampleFindings":{ "name":"CreateSampleFindings", @@ -145,7 +145,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Create a new ThreatIntelSet. ThreatIntelSets consist of known malicious IP addresses. GuardDuty generates findings based on ThreatIntelSets. Only users of the master account can use this operation.

" + "documentation":"

Creates a new ThreatIntelSet. ThreatIntelSets consist of known malicious IP addresses. GuardDuty generates findings based on ThreatIntelSets. Only users of the master account can use this operation.

" }, "DeclineInvitations":{ "name":"DeclineInvitations", @@ -160,7 +160,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Declines invitations sent to the current member account by AWS account specified by their account IDs.

" + "documentation":"

Declines invitations sent to the current member account by AWS accounts specified by their account IDs.

" }, "DeleteDetector":{ "name":"DeleteDetector", @@ -175,7 +175,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Deletes a Amazon GuardDuty detector specified by the detector ID.

" + "documentation":"

Deletes an Amazon GuardDuty detector that is specified by the detector ID.

" }, "DeleteFilter":{ "name":"DeleteFilter", @@ -205,7 +205,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Deletes the IPSet specified by the ipSetId. IPSets are called Trusted IP lists in the console user interface.

" + "documentation":"

Deletes the IPSet specified by the ipSetId. IPSets are called trusted IP lists in the console user interface.

" }, "DeleteInvitations":{ "name":"DeleteInvitations", @@ -265,7 +265,22 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Deletes ThreatIntelSet specified by the ThreatIntelSet ID.

" + "documentation":"

Deletes the ThreatIntelSet specified by the ThreatIntelSet ID.

" + }, + "DescribeOrganizationConfiguration":{ + "name":"DescribeOrganizationConfiguration", + "http":{ + "method":"GET", + "requestUri":"/detector/{detectorId}/admin", + "responseCode":200 + }, + "input":{"shape":"DescribeOrganizationConfigurationRequest"}, + "output":{"shape":"DescribeOrganizationConfigurationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Returns information about the account selected as the delegated administrator for GuardDuty.

" }, "DescribePublishingDestination":{ "name":"DescribePublishingDestination", @@ -282,6 +297,21 @@ ], "documentation":"

Returns information about the publishing destination specified by the provided destinationId.

" }, + "DisableOrganizationAdminAccount":{ + "name":"DisableOrganizationAdminAccount", + "http":{ + "method":"POST", + "requestUri":"/admin/disable", + "responseCode":200 + }, + "input":{"shape":"DisableOrganizationAdminAccountRequest"}, + "output":{"shape":"DisableOrganizationAdminAccountResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Disables an AWS account within the Organization as the GuardDuty delegated administrator.

" + }, "DisassociateFromMasterAccount":{ "name":"DisassociateFromMasterAccount", "http":{ @@ -312,6 +342,21 @@ ], "documentation":"

Disassociates GuardDuty member accounts (to the current GuardDuty master account) specified by the account IDs.

" }, + "EnableOrganizationAdminAccount":{ + "name":"EnableOrganizationAdminAccount", + "http":{ + "method":"POST", + "requestUri":"/admin/enable", + "responseCode":200 + }, + "input":{"shape":"EnableOrganizationAdminAccountRequest"}, + "output":{"shape":"EnableOrganizationAdminAccountResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Enables an AWS account within the organization as the GuardDuty delegated administrator.

" + }, "GetDetector":{ "name":"GetDetector", "http":{ @@ -370,7 +415,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Lists Amazon GuardDuty findings' statistics for the specified detector ID.

" + "documentation":"

Lists Amazon GuardDuty findings statistics for the specified detector ID.

" }, "GetIPSet":{ "name":"GetIPSet", @@ -460,7 +505,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Invites other AWS accounts (created as members of the current AWS account by CreateMembers) to enable GuardDuty and allow the current AWS account to view and manage these accounts' GuardDuty findings on their behalf as the master account.

" + "documentation":"

Invites other AWS accounts (created as members of the current AWS account by CreateMembers) to enable GuardDuty, and allow the current AWS account to view and manage these accounts' GuardDuty findings on their behalf as the master account.

" }, "ListDetectors":{ "name":"ListDetectors", @@ -552,6 +597,21 @@ ], "documentation":"

Lists details about all member accounts for the current GuardDuty master account.

" }, + "ListOrganizationAdminAccounts":{ + "name":"ListOrganizationAdminAccounts", + "http":{ + "method":"GET", + "requestUri":"/admin", + "responseCode":200 + }, + "input":{"shape":"ListOrganizationAdminAccountsRequest"}, + "output":{"shape":"ListOrganizationAdminAccountsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Lists the accounts configured as GuardDuty delegated administrators.

" + }, "ListPublishingDestinations":{ "name":"ListPublishingDestinations", "http":{ @@ -580,7 +640,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, and Threat Intel sets, with a limit of 50 tags per resource. When invoked, this operation returns all assigned tags for a given resource..

" + "documentation":"

Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, and threat intel sets, with a limit of 50 tags per resource. When invoked, this operation returns all assigned tags for a given resource.

" }, "ListThreatIntelSets":{ "name":"ListThreatIntelSets", @@ -625,7 +685,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Stops GuardDuty monitoring for the specified member accounnts. Use the StartMonitoringMembers to restart monitoring for those accounts.

" + "documentation":"

Stops GuardDuty monitoring for the specified member accounts. Use the StartMonitoringMembers operation to restart monitoring for those accounts.

" }, "TagResource":{ "name":"TagResource", @@ -732,6 +792,21 @@ ], "documentation":"

Updates the IPSet specified by the IPSet ID.

" }, + "UpdateOrganizationConfiguration":{ + "name":"UpdateOrganizationConfiguration", + "http":{ + "method":"POST", + "requestUri":"/detector/{detectorId}/admin", + "responseCode":200 + }, + "input":{"shape":"UpdateOrganizationConfigurationRequest"}, + "output":{"shape":"UpdateOrganizationConfigurationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ], + "documentation":"

Updates the delegated administrator account with the values provided.

" + }, "UpdatePublishingDestination":{ "name":"UpdatePublishingDestination", "http":{ @@ -760,7 +835,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Updates the ThreatIntelSet specified by ThreatIntelSet ID.

" + "documentation":"

Updates the ThreatIntelSet specified by the ThreatIntelSet ID.

" } }, "shapes":{ @@ -785,7 +860,7 @@ }, "InvitationId":{ "shape":"String", - "documentation":"

This value is used to validate the master account to the member account.

", + "documentation":"

The value that is used to validate the master account to the member account.

", "locationName":"invitationId" } } @@ -795,12 +870,28 @@ "members":{ } }, + "AccessControlList":{ + "type":"structure", + "members":{ + "AllowsPublicReadAccess":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether public read access for the bucket is enabled through an Access Control List (ACL).

", + "locationName":"allowsPublicReadAccess" + }, + "AllowsPublicWriteAccess":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether public write access for the bucket is enabled through an Access Control List (ACL).

", + "locationName":"allowsPublicWriteAccess" + } + }, + "documentation":"

Contains information on the current access control policies for the bucket.

" + }, "AccessKeyDetails":{ "type":"structure", "members":{ "AccessKeyId":{ "shape":"String", - "documentation":"

Access key ID of the user.

", + "documentation":"

The access key ID of the user.

", "locationName":"accessKeyId" }, "PrincipalId":{ @@ -830,12 +921,12 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Member account ID.

", + "documentation":"

The member account ID.

", "locationName":"accountId" }, "Email":{ "shape":"Email", - "documentation":"

Member account's email address.

", + "documentation":"

The email address of the member account.

", "locationName":"email" } }, @@ -858,12 +949,23 @@ "max":50, "min":1 }, + "AccountLevelPermissions":{ + "type":"structure", + "members":{ + "BlockPublicAccess":{ + "shape":"BlockPublicAccess", + "documentation":"

Describes the S3 Block Public Access settings of the bucket's parent account.

", + "locationName":"blockPublicAccess" + } + }, + "documentation":"

Contains information about the account level permissions on the S3 bucket.

" + }, "Action":{ "type":"structure", "members":{ "ActionType":{ "shape":"String", - "documentation":"

GuardDuty Finding activity type.

", + "documentation":"

The GuardDuty finding activity type.

", "locationName":"actionType" }, "AwsApiCallAction":{ @@ -887,7 +989,38 @@ "locationName":"portProbeAction" } }, - "documentation":"

Contains information about action.

" + "documentation":"

Contains information about actions.

" + }, + "AdminAccount":{ + "type":"structure", + "members":{ + "AdminAccountId":{ + "shape":"String", + "documentation":"

The AWS account ID for the account.

", + "locationName":"adminAccountId" + }, + "AdminStatus":{ + "shape":"AdminStatus", + "documentation":"

Indicates whether the account is enabled as the delegated administrator.

", + "locationName":"adminStatus" + } + }, + "documentation":"

The account within the organization specified as the GuardDuty delegated administrator.

" + }, + "AdminAccounts":{ + "type":"list", + "member":{"shape":"AdminAccount"}, + "max":1, + "min":0 + }, + "AdminStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLE_IN_PROGRESS" + ], + "max":300, + "min":1 }, "ArchiveFindingsRequest":{ "type":"structure", @@ -904,7 +1037,7 @@ }, "FindingIds":{ "shape":"FindingIds", - "documentation":"

IDs of the findings that you want to archive.

", + "documentation":"

The IDs of the findings that you want to archive.

", "locationName":"findingIds" } } @@ -919,27 +1052,27 @@ "members":{ "Api":{ "shape":"String", - "documentation":"

AWS API name.

", + "documentation":"

The AWS API name.

", "locationName":"api" }, "CallerType":{ "shape":"String", - "documentation":"

AWS API caller type.

", + "documentation":"

The AWS API caller type.

", "locationName":"callerType" }, "DomainDetails":{ "shape":"DomainDetails", - "documentation":"

Domain information for the AWS API call.

", + "documentation":"

The domain information for the AWS API call.

", "locationName":"domainDetails" }, "RemoteIpDetails":{ "shape":"RemoteIpDetails", - "documentation":"

Remote IP information of the connection.

", + "documentation":"

The remote IP information of the connection.

", "locationName":"remoteIpDetails" }, "ServiceName":{ "shape":"String", - "documentation":"

AWS service name whose API was invoked.

", + "documentation":"

The AWS service name whose API was invoked.

", "locationName":"serviceName" } }, @@ -959,17 +1092,80 @@ "locationName":"__type" } }, - "documentation":"

Bad request exception object.

", + "documentation":"

A bad request exception object.

", "error":{"httpStatusCode":400}, "exception":true }, + "BlockPublicAccess":{ + "type":"structure", + "members":{ + "IgnorePublicAcls":{ + "shape":"Boolean", + "documentation":"

Indicates if S3 Block Public Access is set to IgnorePublicAcls.

", + "locationName":"ignorePublicAcls" + }, + "RestrictPublicBuckets":{ + "shape":"Boolean", + "documentation":"

Indicates if S3 Block Public Access is set to RestrictPublicBuckets.

", + "locationName":"restrictPublicBuckets" + }, + "BlockPublicAcls":{ + "shape":"Boolean", + "documentation":"

Indicates if S3 Block Public Access is set to BlockPublicAcls.

", + "locationName":"blockPublicAcls" + }, + "BlockPublicPolicy":{ + "shape":"Boolean", + "documentation":"

Indicates if S3 Block Public Access is set to BlockPublicPolicy.

", + "locationName":"blockPublicPolicy" + } + }, + "documentation":"

Contains information on how the bucker owner's S3 Block Public Access settings are being applied to the S3 bucket. See S3 Block Public Access for more information.

" + }, "Boolean":{"type":"boolean"}, + "BucketLevelPermissions":{ + "type":"structure", + "members":{ + "AccessControlList":{ + "shape":"AccessControlList", + "documentation":"

Contains information on how Access Control Policies are applied to the bucket.

", + "locationName":"accessControlList" + }, + "BucketPolicy":{ + "shape":"BucketPolicy", + "documentation":"

Contains information on the bucket policies for the S3 bucket.

", + "locationName":"bucketPolicy" + }, + "BlockPublicAccess":{ + "shape":"BlockPublicAccess", + "documentation":"

Contains information on which account level S3 Block Public Access settings are applied to the S3 bucket.

", + "locationName":"blockPublicAccess" + } + }, + "documentation":"

Contains information about the bucket level permissions for the S3 bucket.

" + }, + "BucketPolicy":{ + "type":"structure", + "members":{ + "AllowsPublicReadAccess":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether public read access for the bucket is enabled through a bucket policy.

", + "locationName":"allowsPublicReadAccess" + }, + "AllowsPublicWriteAccess":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether public write access for the bucket is enabled through a bucket policy.

", + "locationName":"allowsPublicWriteAccess" + } + }, + "documentation":"

Contains information on the current bucket policies for the S3 bucket.

" + }, "City":{ "type":"structure", "members":{ "CityName":{ "shape":"String", - "documentation":"

City name of the remote IP address.

", + "documentation":"

The city name of the remote IP address.

", "locationName":"cityName" } }, @@ -985,68 +1181,68 @@ "members":{ "Eq":{ "shape":"Eq", - "documentation":"

Represents the equal condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents the equal condition to be applied to a single field when querying for findings.

", "deprecated":true, "locationName":"eq" }, "Neq":{ "shape":"Neq", - "documentation":"

Represents the not equal condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents the not equal condition to be applied to a single field when querying for findings.

", "deprecated":true, "locationName":"neq" }, "Gt":{ "shape":"Integer", - "documentation":"

Represents a greater than condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents a greater than condition to be applied to a single field when querying for findings.

", "deprecated":true, "locationName":"gt" }, "Gte":{ "shape":"Integer", - "documentation":"

Represents a greater than equal condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents a greater than or equal condition to be applied to a single field when querying for findings.

", "deprecated":true, "locationName":"gte" }, "Lt":{ "shape":"Integer", - "documentation":"

Represents a less than condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents a less than condition to be applied to a single field when querying for findings.

", "deprecated":true, "locationName":"lt" }, "Lte":{ "shape":"Integer", - "documentation":"

Represents a less than equal condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents a less than or equal condition to be applied to a single field when querying for findings.

", "deprecated":true, "locationName":"lte" }, "Equals":{ "shape":"Equals", - "documentation":"

Represents an equal condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents an equal condition to be applied to a single field when querying for findings.

", "locationName":"equals" }, "NotEquals":{ "shape":"NotEquals", - "documentation":"

Represents an not equal condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents a not equal condition to be applied to a single field when querying for findings.

", "locationName":"notEquals" }, "GreaterThan":{ "shape":"Long", - "documentation":"

Represents a greater than condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents a greater than condition to be applied to a single field when querying for findings.

", "locationName":"greaterThan" }, "GreaterThanOrEqual":{ "shape":"Long", - "documentation":"

Represents a greater than equal condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents a greater than or equal condition to be applied to a single field when querying for findings.

", "locationName":"greaterThanOrEqual" }, "LessThan":{ "shape":"Long", - "documentation":"

Represents a less than condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents a less than condition to be applied to a single field when querying for findings.

", "locationName":"lessThan" }, "LessThanOrEqual":{ "shape":"Long", - "documentation":"

Represents a less than equal condition to be applied to a single field when querying for findings.

", + "documentation":"

Represents a less than or equal condition to be applied to a single field when querying for findings.

", "locationName":"lessThanOrEqual" } }, @@ -1062,16 +1258,16 @@ "members":{ "CountryCode":{ "shape":"String", - "documentation":"

Country code of the remote IP address.

", + "documentation":"

The country code of the remote IP address.

", "locationName":"countryCode" }, "CountryName":{ "shape":"String", - "documentation":"

Country name of the remote IP address.

", + "documentation":"

The country name of the remote IP address.

", "locationName":"countryName" } }, - "documentation":"

Contains information about the country in which the remote IP address is located.

" + "documentation":"

Contains information about the country where the remote IP address is located.

" }, "CreateDetectorRequest":{ "type":"structure", @@ -1079,7 +1275,7 @@ "members":{ "Enable":{ "shape":"Boolean", - "documentation":"

A boolean value that specifies whether the detector is to be enabled.

", + "documentation":"

A Boolean value that specifies whether the detector is to be enabled.

", "locationName":"enable" }, "ClientToken":{ @@ -1090,7 +1286,7 @@ }, "FindingPublishingFrequency":{ "shape":"FindingPublishingFrequency", - "documentation":"

A enum value that specifies how frequently customer got Finding updates published.

", + "documentation":"

An enum value that specifies how frequently updated findings are exported.

", "locationName":"findingPublishingFrequency" }, "Tags":{ @@ -1120,7 +1316,7 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector of the GuardDuty account for which you want to create a filter.

", + "documentation":"

The unique ID of the detector of the GuardDuty account that you want to create a filter for.

", "location":"uri", "locationName":"detectorId" }, @@ -1146,7 +1342,7 @@ }, "FindingCriteria":{ "shape":"FindingCriteria", - "documentation":"

Represents the criteria to be used in the filter for querying findings.

", + "documentation":"

Represents the criteria to be used in the filter for querying findings.

You can only use the following attributes to query findings:

  • accountId

  • region

  • confidence

  • id

  • resource.accessKeyDetails.accessKeyId

  • resource.accessKeyDetails.principalId

  • resource.accessKeyDetails.userName

  • resource.accessKeyDetails.userType

  • resource.instanceDetails.iamInstanceProfile.id

  • resource.instanceDetails.imageId

  • resource.instanceDetails.instanceId

  • resource.instanceDetails.outpostArn

  • resource.instanceDetails.networkInterfaces.ipv6Addresses

  • resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress

  • resource.instanceDetails.networkInterfaces.publicDnsName

  • resource.instanceDetails.networkInterfaces.publicIp

  • resource.instanceDetails.networkInterfaces.securityGroups.groupId

  • resource.instanceDetails.networkInterfaces.securityGroups.groupName

  • resource.instanceDetails.networkInterfaces.subnetId

  • resource.instanceDetails.networkInterfaces.vpcId

  • resource.instanceDetails.tags.key

  • resource.instanceDetails.tags.value

  • resource.resourceType

  • service.action.actionType

  • service.action.awsApiCallAction.api

  • service.action.awsApiCallAction.callerType

  • service.action.awsApiCallAction.remoteIpDetails.city.cityName

  • service.action.awsApiCallAction.remoteIpDetails.country.countryName

  • service.action.awsApiCallAction.remoteIpDetails.ipAddressV4

  • service.action.awsApiCallAction.remoteIpDetails.organization.asn

  • service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg

  • service.action.awsApiCallAction.serviceName

  • service.action.dnsRequestAction.domain

  • service.action.networkConnectionAction.blocked

  • service.action.networkConnectionAction.connectionDirection

  • service.action.networkConnectionAction.localPortDetails.port

  • service.action.networkConnectionAction.protocol

  • service.action.networkConnectionAction.localIpDetails.ipAddressV4

  • service.action.networkConnectionAction.remoteIpDetails.city.cityName

  • service.action.networkConnectionAction.remoteIpDetails.country.countryName

  • service.action.networkConnectionAction.remoteIpDetails.ipAddressV4

  • service.action.networkConnectionAction.remoteIpDetails.organization.asn

  • service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg

  • service.action.networkConnectionAction.remotePortDetails.port

  • service.additionalInfo.threatListName

  • service.archived

    When this attribute is set to TRUE, only archived findings are listed. When it's set to FALSE, only unarchived findings are listed. When this attribute is not set, all existing findings are listed.

  • service.resourceRole

  • severity

  • type

  • updatedAt

    Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds.

", "locationName":"findingCriteria" }, "ClientToken":{ @@ -1185,13 +1381,13 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector of the GuardDuty account for which you want to create an IPSet.

", + "documentation":"

The unique ID of the detector of the GuardDuty account that you want to create an IPSet for.

", "location":"uri", "locationName":"detectorId" }, "Name":{ "shape":"Name", - "documentation":"

The user friendly name to identify the IPSet. This name is displayed in all findings that are triggered by activity that involves IP addresses included in this IPSet.

", + "documentation":"

The user-friendly name to identify the IPSet.

Allowed characters are alphanumerics, spaces, hyphens (-), and underscores (_).

", "locationName":"name" }, "Format":{ @@ -1201,12 +1397,12 @@ }, "Location":{ "shape":"Location", - "documentation":"

The URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key)

", + "documentation":"

The URI of the file that contains the IPSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Activate":{ "shape":"Boolean", - "documentation":"

A boolean value that indicates whether GuardDuty is to start using the uploaded IPSet.

", + "documentation":"

A Boolean value that indicates whether GuardDuty is to start using the uploaded IPSet.

", "locationName":"activate" }, "ClientToken":{ @@ -1242,7 +1438,7 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector of the GuardDuty account with which you want to associate member accounts.

", + "documentation":"

The unique ID of the detector of the GuardDuty account that you want to associate member accounts with.

", "location":"uri", "locationName":"detectorId" }, @@ -1259,7 +1455,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"UnprocessedAccounts", - "documentation":"

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

", + "documentation":"

A list of objects that include the accountIds of the unprocessed accounts and a result string that explains why each was unprocessed.

", "locationName":"unprocessedAccounts" } } @@ -1280,12 +1476,12 @@ }, "DestinationType":{ "shape":"DestinationType", - "documentation":"

The type of resource for the publishing destination. Currently only S3 is supported.

", + "documentation":"

The type of resource for the publishing destination. Currently only Amazon S3 buckets are supported.

", "locationName":"destinationType" }, "DestinationProperties":{ "shape":"DestinationProperties", - "documentation":"

Properties of the publishing destination, including the ARNs for the destination and the KMS key used for encryption.

", + "documentation":"

The properties of the publishing destination, including the ARNs for the destination and the KMS key used for encryption.

", "locationName":"destinationProperties" }, "ClientToken":{ @@ -1302,7 +1498,7 @@ "members":{ "DestinationId":{ "shape":"String", - "documentation":"

The ID of the publishing destination created.

", + "documentation":"

The ID of the publishing destination that is created.

", "locationName":"destinationId" } } @@ -1319,7 +1515,7 @@ }, "FindingTypes":{ "shape":"FindingTypes", - "documentation":"

Types of sample findings to generate.

", + "documentation":"

The types of sample findings to generate.

", "locationName":"findingTypes" } } @@ -1341,13 +1537,13 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector of the GuardDuty account for which you want to create a threatIntelSet.

", + "documentation":"

The unique ID of the detector of the GuardDuty account that you want to create a threatIntelSet for.

", "location":"uri", "locationName":"detectorId" }, "Name":{ "shape":"Name", - "documentation":"

A user-friendly ThreatIntelSet name that is displayed in all finding generated by activity that involves IP addresses included in this ThreatIntelSet.

", + "documentation":"

A user-friendly ThreatIntelSet name displayed in all findings that are generated by activity that involves IP addresses included in this ThreatIntelSet.

", "locationName":"name" }, "Format":{ @@ -1357,12 +1553,12 @@ }, "Location":{ "shape":"Location", - "documentation":"

The URI of the file that contains the ThreatIntelSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key).

", + "documentation":"

The URI of the file that contains the ThreatIntelSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Activate":{ "shape":"Boolean", - "documentation":"

A boolean value that indicates whether GuardDuty is to start using the uploaded ThreatIntelSet.

", + "documentation":"

A Boolean value that indicates whether GuardDuty is to start using the uploaded ThreatIntelSet.

", "locationName":"activate" }, "ClientToken":{ @@ -1373,7 +1569,7 @@ }, "Tags":{ "shape":"TagMap", - "documentation":"

The tags to be added to a new Threat List resource.

", + "documentation":"

The tags to be added to a new threat list resource.

", "locationName":"tags" } } @@ -1411,11 +1607,27 @@ "members":{ "UnprocessedAccounts":{ "shape":"UnprocessedAccounts", - "documentation":"

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

", + "documentation":"

A list of objects that contain the unprocessed account and a result string that explains why it was unprocessed.

", "locationName":"unprocessedAccounts" } } }, + "DefaultServerSideEncryption":{ + "type":"structure", + "members":{ + "EncryptionType":{ + "shape":"String", + "documentation":"

The type of encryption used for objects within the S3 bucket.

", + "locationName":"encryptionType" + }, + "KmsMasterKeyArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the KMS encryption key. Only available if the bucket EncryptionType is aws:kms.

", + "locationName":"kmsMasterKeyArn" + } + }, + "documentation":"

Contains information on the server side encryption method used in the S3 bucket. See S3 Server-Side Encryption for more information.

" + }, "DeleteDetectorRequest":{ "type":"structure", "required":["DetectorId"], @@ -1442,13 +1654,13 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector the filter is associated with.

", + "documentation":"

The unique ID of the detector that the filter is associated with.

", "location":"uri", "locationName":"detectorId" }, "FilterName":{ "shape":"String", - "documentation":"

The name of the filter you want to delete.

", + "documentation":"

The name of the filter that you want to delete.

", "location":"uri", "locationName":"filterName" } @@ -1502,7 +1714,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"UnprocessedAccounts", - "documentation":"

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

", + "documentation":"

A list of objects that contain the unprocessed account and a result string that explains why it was unprocessed.

", "locationName":"unprocessedAccounts" } } @@ -1573,13 +1785,13 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector the threatIntelSet is associated with.

", + "documentation":"

The unique ID of the detector that the threatIntelSet is associated with.

", "location":"uri", "locationName":"detectorId" }, "ThreatIntelSetId":{ "shape":"String", - "documentation":"

The unique ID of the threatIntelSet you want to delete.

", + "documentation":"

The unique ID of the threatIntelSet that you want to delete.

", "location":"uri", "locationName":"threatIntelSetId" } @@ -1590,6 +1802,37 @@ "members":{ } }, + "DescribeOrganizationConfigurationRequest":{ + "type":"structure", + "required":["DetectorId"], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "documentation":"

The ID of the detector to retrieve information about the delegated administrator from.

", + "location":"uri", + "locationName":"detectorId" + } + } + }, + "DescribeOrganizationConfigurationResponse":{ + "type":"structure", + "required":[ + "AutoEnable", + "MemberAccountLimitReached" + ], + "members":{ + "AutoEnable":{ + "shape":"Boolean", + "documentation":"

Indicates whether GuardDuty is automatically enabled for accounts added to the organization.

", + "locationName":"autoEnable" + }, + "MemberAccountLimitReached":{ + "shape":"Boolean", + "documentation":"

Indicates whether the maximum number of allowed member accounts are already associated with the delegated administrator master account.

", + "locationName":"memberAccountLimitReached" + } + } + }, "DescribePublishingDestinationRequest":{ "type":"structure", "required":[ @@ -1628,7 +1871,7 @@ }, "DestinationType":{ "shape":"DestinationType", - "documentation":"

The type of the publishing destination. Currently, only S3 is supported.

", + "documentation":"

The type of publishing destination. Currently, only Amazon S3 buckets are supported.

", "locationName":"destinationType" }, "Status":{ @@ -1663,7 +1906,7 @@ }, "DestinationType":{ "shape":"DestinationType", - "documentation":"

The type of resource used for the publishing destination. Currently, only S3 is supported.

", + "documentation":"

The type of resource used for the publishing destination. Currently, only Amazon S3 buckets are supported.

", "locationName":"destinationType" }, "Status":{ @@ -1672,7 +1915,7 @@ "locationName":"status" } }, - "documentation":"

Contains information about a publishing destination, including the ID, type, and status.

" + "documentation":"

Contains information about the publishing destination, including the ID, type, and status.

" }, "DestinationProperties":{ "type":"structure", @@ -1688,7 +1931,7 @@ "locationName":"kmsKeyArn" } }, - "documentation":"

Contains the ARN of the resource to publish to, such as an S3 bucket, and the ARN of the KMS key to use to encrypt published findings.

" + "documentation":"

Contains the Amazon Resource Name (ARN) of the resource to publish to, such as an S3 bucket, and the ARN of the KMS key to use to encrypt published findings.

" }, "DestinationType":{ "type":"string", @@ -1720,6 +1963,22 @@ "max":300, "min":1 }, + "DisableOrganizationAdminAccountRequest":{ + "type":"structure", + "required":["AdminAccountId"], + "members":{ + "AdminAccountId":{ + "shape":"String", + "documentation":"

The AWS Account ID for the organizations account to be disabled as a GuardDuty delegated administrator.

", + "locationName":"adminAccountId" + } + } + }, + "DisableOrganizationAdminAccountResponse":{ + "type":"structure", + "members":{ + } + }, "DisassociateFromMasterAccountRequest":{ "type":"structure", "required":["DetectorId"], @@ -1746,13 +2005,13 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector of the GuardDuty account whose members you want to disassociate from master.

", + "documentation":"

The unique ID of the detector of the GuardDuty account whose members you want to disassociate from the master account.

", "location":"uri", "locationName":"detectorId" }, "AccountIds":{ "shape":"AccountIds", - "documentation":"

A list of account IDs of the GuardDuty member accounts that you want to disassociate from master.

", + "documentation":"

A list of account IDs of the GuardDuty member accounts that you want to disassociate from the master account.

", "locationName":"accountIds" } } @@ -1763,7 +2022,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"UnprocessedAccounts", - "documentation":"

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

", + "documentation":"

A list of objects that contain the unprocessed account and a result string that explains why it was unprocessed.

", "locationName":"unprocessedAccounts" } } @@ -1773,7 +2032,7 @@ "members":{ "Domain":{ "shape":"String", - "documentation":"

Domain information for the API request.

", + "documentation":"

The domain information for the API request.

", "locationName":"domain" } }, @@ -1784,7 +2043,7 @@ "members":{ "Domain":{ "shape":"String", - "documentation":"

Domain information for the AWS API call.

", + "documentation":"

The domain information for the AWS API call.

", "locationName":"domain" } }, @@ -1796,6 +2055,22 @@ "max":64, "min":1 }, + "EnableOrganizationAdminAccountRequest":{ + "type":"structure", + "required":["AdminAccountId"], + "members":{ + "AdminAccountId":{ + "shape":"String", + "documentation":"

The AWS Account ID for the organization account to be enabled as a GuardDuty delegated administrator.

", + "locationName":"adminAccountId" + } + } + }, + "EnableOrganizationAdminAccountResponse":{ + "type":"structure", + "members":{ + } + }, "Eq":{ "type":"list", "member":{"shape":"String"} @@ -1874,7 +2149,7 @@ }, "Arn":{ "shape":"String", - "documentation":"

The ARN for the finding.

", + "documentation":"

The ARN of the finding.

", "locationName":"arn" }, "Confidence":{ @@ -1884,7 +2159,7 @@ }, "CreatedAt":{ "shape":"String", - "documentation":"

The time and date at which the finding was created.

", + "documentation":"

The time and date when the finding was created.

", "locationName":"createdAt" }, "Description":{ @@ -1904,7 +2179,7 @@ }, "Region":{ "shape":"String", - "documentation":"

The Region in which the finding was generated.

", + "documentation":"

The Region where the finding was generated.

", "locationName":"region" }, "Resource":{ @@ -1927,17 +2202,17 @@ }, "Title":{ "shape":"String", - "documentation":"

The title for the finding.

", + "documentation":"

The title of the finding.

", "locationName":"title" }, "Type":{ "shape":"FindingType", - "documentation":"

The type of the finding.

", + "documentation":"

The type of finding.

", "locationName":"type" }, "UpdatedAt":{ "shape":"String", - "documentation":"

The time and date at which the finding was laste updated.

", + "documentation":"

The time and date when the finding was last updated.

", "locationName":"updatedAt" } }, @@ -1988,7 +2263,7 @@ "members":{ "CountBySeverity":{ "shape":"CountBySeverity", - "documentation":"

Represents a map of severity to count statistic for a set of findings

", + "documentation":"

Represents a map of severity to count statistics for a set of findings.

", "locationName":"countBySeverity" } }, @@ -2016,12 +2291,12 @@ "members":{ "Lat":{ "shape":"Double", - "documentation":"

Latitude information of remote IP address.

", + "documentation":"

The latitude information of the remote IP address.

", "locationName":"lat" }, "Lon":{ "shape":"Double", - "documentation":"

Longitude information of remote IP address.

", + "documentation":"

The longitude information of the remote IP address.

", "locationName":"lon" } }, @@ -2048,12 +2323,12 @@ "members":{ "CreatedAt":{ "shape":"String", - "documentation":"

Detector creation timestamp.

", + "documentation":"

The timestamp of when the detector was created.

", "locationName":"createdAt" }, "FindingPublishingFrequency":{ "shape":"FindingPublishingFrequency", - "documentation":"

Finding publishing frequency.

", + "documentation":"

The publishing frequency of the finding.

", "locationName":"findingPublishingFrequency" }, "ServiceRole":{ @@ -2068,7 +2343,7 @@ }, "UpdatedAt":{ "shape":"String", - "documentation":"

Detector last update timestamp.

", + "documentation":"

The last-updated timestamp for the detector.

", "locationName":"updatedAt" }, "Tags":{ @@ -2087,7 +2362,7 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector the filter is associated with.

", + "documentation":"

The unique ID of the detector that the filter is associated with.

", "location":"uri", "locationName":"detectorId" }, @@ -2154,7 +2429,7 @@ }, "FindingIds":{ "shape":"FindingIds", - "documentation":"

IDs of the findings that you want to retrieve.

", + "documentation":"

The IDs of the findings that you want to retrieve.

", "locationName":"findingIds" }, "SortCriteria":{ @@ -2190,12 +2465,12 @@ }, "FindingStatisticTypes":{ "shape":"FindingStatisticTypes", - "documentation":"

Types of finding statistics to retrieve.

", + "documentation":"

The types of finding statistics to retrieve.

", "locationName":"findingStatisticTypes" }, "FindingCriteria":{ "shape":"FindingCriteria", - "documentation":"

Represents the criteria used for querying findings.

", + "documentation":"

Represents the criteria that is used for querying findings.

", "locationName":"findingCriteria" } } @@ -2206,7 +2481,7 @@ "members":{ "FindingStatistics":{ "shape":"FindingStatistics", - "documentation":"

Finding statistics object.

", + "documentation":"

The finding statistics object.

", "locationName":"findingStatistics" } } @@ -2220,7 +2495,7 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector the ipSet is associated with.

", + "documentation":"

The unique ID of the detector that the IPSet is associated with.

", "location":"uri", "locationName":"detectorId" }, @@ -2243,7 +2518,7 @@ "members":{ "Name":{ "shape":"Name", - "documentation":"

The user friendly name for the IPSet.

", + "documentation":"

The user-friendly name for the IPSet.

", "locationName":"name" }, "Format":{ @@ -2253,17 +2528,17 @@ }, "Location":{ "shape":"Location", - "documentation":"

The URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key)

", + "documentation":"

The URI of the file that contains the IPSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Status":{ "shape":"IpSetStatus", - "documentation":"

The status of ipSet file uploaded.

", + "documentation":"

The status of IPSet file that was uploaded.

", "locationName":"status" }, "Tags":{ "shape":"TagMap", - "documentation":"

The tags of the IP set resource.

", + "documentation":"

The tags of the IPSet resource.

", "locationName":"tags" } } @@ -2301,7 +2576,7 @@ "members":{ "Master":{ "shape":"Master", - "documentation":"

Master account details.

", + "documentation":"

The master account details.

", "locationName":"master" } } @@ -2340,7 +2615,7 @@ }, "UnprocessedAccounts":{ "shape":"UnprocessedAccounts", - "documentation":"

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

", + "documentation":"

A list of objects that contain the unprocessed account and a result string that explains why it was unprocessed.

", "locationName":"unprocessedAccounts" } } @@ -2354,13 +2629,13 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector the threatIntelSet is associated with.

", + "documentation":"

The unique ID of the detector that the threatIntelSet is associated with.

", "location":"uri", "locationName":"detectorId" }, "ThreatIntelSetId":{ "shape":"String", - "documentation":"

The unique ID of the threatIntelSet you want to get.

", + "documentation":"

The unique ID of the threatIntelSet that you want to get.

", "location":"uri", "locationName":"threatIntelSetId" } @@ -2377,7 +2652,7 @@ "members":{ "Name":{ "shape":"Name", - "documentation":"

A user-friendly ThreatIntelSet name that is displayed in all finding generated by activity that involves IP addresses included in this ThreatIntelSet.

", + "documentation":"

A user-friendly ThreatIntelSet name displayed in all findings that are generated by activity that involves IP addresses included in this ThreatIntelSet.

", "locationName":"name" }, "Format":{ @@ -2387,7 +2662,7 @@ }, "Location":{ "shape":"Location", - "documentation":"

The URI of the file that contains the ThreatIntelSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key).

", + "documentation":"

The URI of the file that contains the ThreatIntelSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Status":{ @@ -2397,7 +2672,7 @@ }, "Tags":{ "shape":"TagMap", - "documentation":"

The tags of the Threat List resource.

", + "documentation":"

The tags of the threat list resource.

", "locationName":"tags" } } @@ -2411,12 +2686,12 @@ "members":{ "Arn":{ "shape":"String", - "documentation":"

AWS EC2 instance profile ARN.

", + "documentation":"

The profile ARN of the EC2 instance.

", "locationName":"arn" }, "Id":{ "shape":"String", - "documentation":"

AWS EC2 instance profile ID.

", + "documentation":"

The profile ID of the EC2 instance.

", "locationName":"id" } }, @@ -2427,7 +2702,7 @@ "members":{ "AvailabilityZone":{ "shape":"String", - "documentation":"

The availability zone of the EC2 instance.

", + "documentation":"

The Availability Zone of the EC2 instance.

", "locationName":"availabilityZone" }, "IamInstanceProfile":{ @@ -2472,7 +2747,7 @@ }, "NetworkInterfaces":{ "shape":"NetworkInterfaces", - "documentation":"

The network interface information of the EC2 instance.

", + "documentation":"

The elastic network interface information of the EC2 instance.

", "locationName":"networkInterfaces" }, "Platform":{ @@ -2508,7 +2783,7 @@ "locationName":"__type" } }, - "documentation":"

Internal server error exception object.

", + "documentation":"

An internal server error exception object.

", "error":{"httpStatusCode":500}, "exception":true }, @@ -2517,7 +2792,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The ID of the account from which the invitations was sent.

", + "documentation":"

The ID of the account that the invitation was sent from.

", "locationName":"accountId" }, "InvitationId":{ @@ -2532,7 +2807,7 @@ }, "InvitedAt":{ "shape":"String", - "documentation":"

Timestamp at which the invitation was sent.

", + "documentation":"

The timestamp when the invitation was sent.

", "locationName":"invitedAt" } }, @@ -2553,7 +2828,7 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector of the GuardDuty account with which you want to invite members.

", + "documentation":"

The unique ID of the detector of the GuardDuty account that you want to invite members with.

", "location":"uri", "locationName":"detectorId" }, @@ -2564,7 +2839,7 @@ }, "DisableEmailNotification":{ "shape":"Boolean", - "documentation":"

A boolean value that specifies whether you want to disable email notification to the accounts that you’re inviting to GuardDuty as members.

", + "documentation":"

A Boolean value that specifies whether you want to disable email notification to the accounts that you’re inviting to GuardDuty as members.

", "locationName":"disableEmailNotification" }, "Message":{ @@ -2580,7 +2855,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"UnprocessedAccounts", - "documentation":"

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

", + "documentation":"

A list of objects that contain the unprocessed account and a result string that explains why it was unprocessed.

", "locationName":"unprocessedAccounts" } } @@ -2627,13 +2902,13 @@ "members":{ "MaxResults":{ "shape":"MaxResults", - "documentation":"

You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.

", + "documentation":"

You can use this parameter to indicate the maximum number of items that you want in the response. The default value is 50. The maximum value is 50.

", "location":"querystring", "locationName":"maxResults" }, "NextToken":{ "shape":"String", - "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", "location":"querystring", "locationName":"nextToken" } @@ -2645,12 +2920,12 @@ "members":{ "DetectorIds":{ "shape":"DetectorIds", - "documentation":"

A list of detector Ids.

", + "documentation":"

A list of detector IDs.

", "locationName":"detectorIds" }, "NextToken":{ "shape":"String", - "documentation":"

Pagination parameter to be used on the next list operation to retrieve more items.

", + "documentation":"

The pagination parameter to be used on the next list operation to retrieve more items.

", "locationName":"nextToken" } } @@ -2661,19 +2936,19 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector the filter is associated with.

", + "documentation":"

The unique ID of the detector that the filter is associated with.

", "location":"uri", "locationName":"detectorId" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.

", + "documentation":"

You can use this parameter to indicate the maximum number of items that you want in the response. The default value is 50. The maximum value is 50.

", "location":"querystring", "locationName":"maxResults" }, "NextToken":{ "shape":"String", - "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", "location":"querystring", "locationName":"nextToken" } @@ -2685,12 +2960,12 @@ "members":{ "FilterNames":{ "shape":"FilterNames", - "documentation":"

A list of filter names

", + "documentation":"

A list of filter names.

", "locationName":"filterNames" }, "NextToken":{ "shape":"String", - "documentation":"

Pagination parameter to be used on the next list operation to retrieve more items.

", + "documentation":"

The pagination parameter to be used on the next list operation to retrieve more items.

", "locationName":"nextToken" } } @@ -2707,7 +2982,7 @@ }, "FindingCriteria":{ "shape":"FindingCriteria", - "documentation":"

Represents the criteria used for querying findings. Valid values include:

  • JSON field name

  • accountId

  • region

  • confidence

  • id

  • resource.accessKeyDetails.accessKeyId

  • resource.accessKeyDetails.principalId

  • resource.accessKeyDetails.userName

  • resource.accessKeyDetails.userType

  • resource.instanceDetails.iamInstanceProfile.id

  • resource.instanceDetails.imageId

  • resource.instanceDetails.instanceId

  • resource.instanceDetails.outpostArn

  • resource.instanceDetails.networkInterfaces.ipv6Addresses

  • resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress

  • resource.instanceDetails.networkInterfaces.publicDnsName

  • resource.instanceDetails.networkInterfaces.publicIp

  • resource.instanceDetails.networkInterfaces.securityGroups.groupId

  • resource.instanceDetails.networkInterfaces.securityGroups.groupName

  • resource.instanceDetails.networkInterfaces.subnetId

  • resource.instanceDetails.networkInterfaces.vpcId

  • resource.instanceDetails.tags.key

  • resource.instanceDetails.tags.value

  • resource.resourceType

  • service.action.actionType

  • service.action.awsApiCallAction.api

  • service.action.awsApiCallAction.callerType

  • service.action.awsApiCallAction.remoteIpDetails.city.cityName

  • service.action.awsApiCallAction.remoteIpDetails.country.countryName

  • service.action.awsApiCallAction.remoteIpDetails.ipAddressV4

  • service.action.awsApiCallAction.remoteIpDetails.organization.asn

  • service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg

  • service.action.awsApiCallAction.serviceName

  • service.action.dnsRequestAction.domain

  • service.action.networkConnectionAction.blocked

  • service.action.networkConnectionAction.connectionDirection

  • service.action.networkConnectionAction.localPortDetails.port

  • service.action.networkConnectionAction.protocol

  • service.action.networkConnectionAction.localIpDetails.ipAddressV4

  • service.action.networkConnectionAction.remoteIpDetails.city.cityName

  • service.action.networkConnectionAction.remoteIpDetails.country.countryName

  • service.action.networkConnectionAction.remoteIpDetails.ipAddressV4

  • service.action.networkConnectionAction.remoteIpDetails.organization.asn

  • service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg

  • service.action.networkConnectionAction.remotePortDetails.port

  • service.additionalInfo.threatListName

  • service.archived

    When this attribute is set to 'true', only archived findings are listed. When it's set to 'false', only unarchived findings are listed. When this attribute is not set, all existing findings are listed.

  • service.resourceRole

  • severity

  • type

  • updatedAt

    Type: Timestamp in Unix Epoch millisecond format: 1486685375000

", + "documentation":"

Represents the criteria used for querying findings. Valid values include:

  • JSON field name

  • accountId

  • region

  • confidence

  • id

  • resource.accessKeyDetails.accessKeyId

  • resource.accessKeyDetails.principalId

  • resource.accessKeyDetails.userName

  • resource.accessKeyDetails.userType

  • resource.instanceDetails.iamInstanceProfile.id

  • resource.instanceDetails.imageId

  • resource.instanceDetails.instanceId

  • resource.instanceDetails.networkInterfaces.ipv6Addresses

  • resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress

  • resource.instanceDetails.networkInterfaces.publicDnsName

  • resource.instanceDetails.networkInterfaces.publicIp

  • resource.instanceDetails.networkInterfaces.securityGroups.groupId

  • resource.instanceDetails.networkInterfaces.securityGroups.groupName

  • resource.instanceDetails.networkInterfaces.subnetId

  • resource.instanceDetails.networkInterfaces.vpcId

  • resource.instanceDetails.tags.key

  • resource.instanceDetails.tags.value

  • resource.resourceType

  • service.action.actionType

  • service.action.awsApiCallAction.api

  • service.action.awsApiCallAction.callerType

  • service.action.awsApiCallAction.remoteIpDetails.city.cityName

  • service.action.awsApiCallAction.remoteIpDetails.country.countryName

  • service.action.awsApiCallAction.remoteIpDetails.ipAddressV4

  • service.action.awsApiCallAction.remoteIpDetails.organization.asn

  • service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg

  • service.action.awsApiCallAction.serviceName

  • service.action.dnsRequestAction.domain

  • service.action.networkConnectionAction.blocked

  • service.action.networkConnectionAction.connectionDirection

  • service.action.networkConnectionAction.localPortDetails.port

  • service.action.networkConnectionAction.protocol

  • service.action.networkConnectionAction.remoteIpDetails.city.cityName

  • service.action.networkConnectionAction.remoteIpDetails.country.countryName

  • service.action.networkConnectionAction.remoteIpDetails.ipAddressV4

  • service.action.networkConnectionAction.remoteIpDetails.organization.asn

  • service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg

  • service.action.networkConnectionAction.remotePortDetails.port

  • service.additionalInfo.threatListName

  • service.archived

    When this attribute is set to 'true', only archived findings are listed. When it's set to 'false', only unarchived findings are listed. When this attribute is not set, all existing findings are listed.

  • service.resourceRole

  • severity

  • type

  • updatedAt

    Type: Timestamp in Unix Epoch millisecond format: 1486685375000

", "locationName":"findingCriteria" }, "SortCriteria":{ @@ -2722,7 +2997,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", "locationName":"nextToken" } } @@ -2733,12 +3008,12 @@ "members":{ "FindingIds":{ "shape":"FindingIds", - "documentation":"

The IDs of the findings you are listing.

", + "documentation":"

The IDs of the findings that you're listing.

", "locationName":"findingIds" }, "NextToken":{ "shape":"String", - "documentation":"

Pagination parameter to be used on the next list operation to retrieve more items.

", + "documentation":"

The pagination parameter to be used on the next list operation to retrieve more items.

", "locationName":"nextToken" } } @@ -2749,7 +3024,7 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector the ipSet is associated with.

", + "documentation":"

The unique ID of the detector that the IPSet is associated with.

", "location":"uri", "locationName":"detectorId" }, @@ -2761,7 +3036,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", "location":"querystring", "locationName":"nextToken" } @@ -2778,7 +3053,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination parameter to be used on the next list operation to retrieve more items.

", + "documentation":"

The pagination parameter to be used on the next list operation to retrieve more items.

", "locationName":"nextToken" } } @@ -2788,13 +3063,13 @@ "members":{ "MaxResults":{ "shape":"MaxResults", - "documentation":"

You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.

", + "documentation":"

You can use this parameter to indicate the maximum number of items that you want in the response. The default value is 50. The maximum value is 50.

", "location":"querystring", "locationName":"maxResults" }, "NextToken":{ "shape":"String", - "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", "location":"querystring", "locationName":"nextToken" } @@ -2810,7 +3085,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination parameter to be used on the next list operation to retrieve more items.

", + "documentation":"

The pagination parameter to be used on the next list operation to retrieve more items.

", "locationName":"nextToken" } } @@ -2833,13 +3108,13 @@ }, "NextToken":{ "shape":"String", - "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "documentation":"

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", "location":"querystring", "locationName":"nextToken" }, "OnlyAssociated":{ "shape":"String", - "documentation":"

Specifies whether to only return associated members or to return all members (including members which haven't been invited yet or have been disassociated).

", + "documentation":"

Specifies whether to only return associated members or to return all members (including members who haven't been invited yet or have been disassociated).

", "location":"querystring", "locationName":"onlyAssociated" } @@ -2855,7 +3130,39 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination parameter to be used on the next list operation to retrieve more items.

", + "documentation":"

The pagination parameter to be used on the next list operation to retrieve more items.

", + "locationName":"nextToken" + } + } + }, + "ListOrganizationAdminAccountsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in the response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "documentation":"

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListOrganizationAdminAccountsResponse":{ + "type":"structure", + "members":{ + "AdminAccounts":{ + "shape":"AdminAccounts", + "documentation":"

An AdminAccounts object that includes a list of accounts configured as GuardDuty delegated administrators.

", + "locationName":"adminAccounts" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The pagination parameter to be used on the next list operation to retrieve more items.

", "locationName":"nextToken" } } @@ -2878,7 +3185,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

A token to use for paginating results returned in the repsonse. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

", + "documentation":"

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

", "location":"querystring", "locationName":"nextToken" } @@ -2890,12 +3197,12 @@ "members":{ "Destinations":{ "shape":"Destinations", - "documentation":"

A Destinations obect that includes information about each publishing destination returned.

", + "documentation":"

A Destinations object that includes information about each publishing destination returned.

", "locationName":"destinations" }, "NextToken":{ "shape":"String", - "documentation":"

A token to use for paginating results returned in the repsonse. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

", + "documentation":"

A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

", "locationName":"nextToken" } } @@ -2906,7 +3213,7 @@ "members":{ "ResourceArn":{ "shape":"GuardDutyArn", - "documentation":"

The Amazon Resource Name (ARN) for the given GuardDuty resource

", + "documentation":"

The Amazon Resource Name (ARN) for the given GuardDuty resource.

", "location":"uri", "locationName":"resourceArn" } @@ -2928,19 +3235,19 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector the threatIntelSet is associated with.

", + "documentation":"

The unique ID of the detector that the threatIntelSet is associated with.

", "location":"uri", "locationName":"detectorId" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.

", + "documentation":"

You can use this parameter to indicate the maximum number of items that you want in the response. The default value is 50. The maximum value is 50.

", "location":"querystring", "locationName":"maxResults" }, "NextToken":{ "shape":"String", - "documentation":"

You can use this parameter to paginate results in the response. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "documentation":"

You can use this parameter to paginate results in the response. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", "location":"querystring", "locationName":"nextToken" } @@ -2957,7 +3264,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

Pagination parameter to be used on the next list operation to retrieve more items.

", + "documentation":"

The pagination parameter to be used on the next list operation to retrieve more items.

", "locationName":"nextToken" } } @@ -2967,7 +3274,7 @@ "members":{ "IpAddressV4":{ "shape":"String", - "documentation":"

IPV4 remote address of the connection.

", + "documentation":"

The IPv4 local address of the connection.

", "locationName":"ipAddressV4" } }, @@ -2978,12 +3285,12 @@ "members":{ "Port":{ "shape":"Integer", - "documentation":"

Port number of the local connection.

", + "documentation":"

The port number of the local connection.

", "locationName":"port" }, "PortName":{ "shape":"String", - "documentation":"

Port name of the local connection.

", + "documentation":"

The port name of the local connection.

", "locationName":"portName" } }, @@ -3000,12 +3307,12 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The ID of the account used as the Master account.

", + "documentation":"

The ID of the account used as the master account.

", "locationName":"accountId" }, "InvitationId":{ "shape":"String", - "documentation":"

This value is used to validate the master account to the member account.

", + "documentation":"

The value used to validate the master account to the member account.

", "locationName":"invitationId" }, "RelationshipStatus":{ @@ -3015,11 +3322,11 @@ }, "InvitedAt":{ "shape":"String", - "documentation":"

Timestamp at which the invitation was sent.

", + "documentation":"

The timestamp when the invitation was sent.

", "locationName":"invitedAt" } }, - "documentation":"

Contains information about the Master account and invitation.

" + "documentation":"

Contains information about the master account and invitation.

" }, "MaxResults":{ "type":"integer", @@ -3038,22 +3345,22 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Member account ID.

", + "documentation":"

The ID of the member account.

", "locationName":"accountId" }, "DetectorId":{ "shape":"DetectorId", - "documentation":"

Member account's detector ID.

", + "documentation":"

The detector ID of the member account.

", "locationName":"detectorId" }, "MasterId":{ "shape":"String", - "documentation":"

Master account ID.

", + "documentation":"

The master account ID.

", "locationName":"masterId" }, "Email":{ "shape":"Email", - "documentation":"

Member account's email address.

", + "documentation":"

The email address of the member account.

", "locationName":"email" }, "RelationshipStatus":{ @@ -3063,16 +3370,16 @@ }, "InvitedAt":{ "shape":"String", - "documentation":"

Timestamp at which the invitation was sent

", + "documentation":"

The timestamp when the invitation was sent.

", "locationName":"invitedAt" }, "UpdatedAt":{ "shape":"String", - "documentation":"

Member last updated timestamp.

", + "documentation":"

The last-updated timestamp of the member.

", "locationName":"updatedAt" } }, - "documentation":"

Continas information about the member account

" + "documentation":"

Contains information about the member account.

" }, "Members":{ "type":"list", @@ -3094,37 +3401,37 @@ "members":{ "Blocked":{ "shape":"Boolean", - "documentation":"

Network connection blocked information.

", + "documentation":"

Indicates whether EC2 blocked the network connection to your instance.

", "locationName":"blocked" }, "ConnectionDirection":{ "shape":"String", - "documentation":"

Network connection direction.

", + "documentation":"

The network connection direction.

", "locationName":"connectionDirection" }, "LocalPortDetails":{ "shape":"LocalPortDetails", - "documentation":"

Local port information of the connection.

", + "documentation":"

The local port information of the connection.

", "locationName":"localPortDetails" }, "Protocol":{ "shape":"String", - "documentation":"

Network connection protocol.

", + "documentation":"

The network connection protocol.

", "locationName":"protocol" }, "LocalIpDetails":{ "shape":"LocalIpDetails", - "documentation":"

Local IP information of the connection.

", + "documentation":"

The local IP information of the connection.

", "locationName":"localIpDetails" }, "RemoteIpDetails":{ "shape":"RemoteIpDetails", - "documentation":"

Remote IP information of the connection.

", + "documentation":"

The remote IP information of the connection.

", "locationName":"remoteIpDetails" }, "RemotePortDetails":{ "shape":"RemotePortDetails", - "documentation":"

Remote port information of the connection.

", + "documentation":"

The remote port information of the connection.

", "locationName":"remotePortDetails" } }, @@ -3135,22 +3442,22 @@ "members":{ "Ipv6Addresses":{ "shape":"Ipv6Addresses", - "documentation":"

A list of EC2 instance IPv6 address information.

", + "documentation":"

A list of IPv6 addresses for the EC2 instance.

", "locationName":"ipv6Addresses" }, "NetworkInterfaceId":{ "shape":"String", - "documentation":"

The ID of the network interface

", + "documentation":"

The ID of the network interface.

", "locationName":"networkInterfaceId" }, "PrivateDnsName":{ "shape":"String", - "documentation":"

Private DNS name of the EC2 instance.

", + "documentation":"

The private DNS name of the EC2 instance.

", "locationName":"privateDnsName" }, "PrivateIpAddress":{ "shape":"String", - "documentation":"

Private IP address of the EC2 instance.

", + "documentation":"

The private IP address of the EC2 instance.

", "locationName":"privateIpAddress" }, "PrivateIpAddresses":{ @@ -3160,17 +3467,17 @@ }, "PublicDnsName":{ "shape":"String", - "documentation":"

Public DNS name of the EC2 instance.

", + "documentation":"

The public DNS name of the EC2 instance.

", "locationName":"publicDnsName" }, "PublicIp":{ "shape":"String", - "documentation":"

Public IP address of the EC2 instance.

", + "documentation":"

The public IP address of the EC2 instance.

", "locationName":"publicIp" }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

Security groups associated with the EC2 instance.

", + "documentation":"

The security groups associated with the EC2 instance.

", "locationName":"securityGroups" }, "SubnetId":{ @@ -3184,7 +3491,7 @@ "locationName":"vpcId" } }, - "documentation":"

Contains information about the network interface of the Ec2 instance.

" + "documentation":"

Contains information about the elastic network interface of the EC2 instance.

" }, "NetworkInterfaces":{ "type":"list", @@ -3206,38 +3513,65 @@ "members":{ "Asn":{ "shape":"String", - "documentation":"

Autonomous system number of the internet provider of the remote IP address.

", + "documentation":"

The Autonomous System Number (ASN) of the internet provider of the remote IP address.

", "locationName":"asn" }, "AsnOrg":{ "shape":"String", - "documentation":"

Organization that registered this ASN.

", + "documentation":"

The organization that registered this ASN.

", "locationName":"asnOrg" }, "Isp":{ "shape":"String", - "documentation":"

ISP information for the internet provider.

", + "documentation":"

The ISP information for the internet provider.

", "locationName":"isp" }, "Org":{ "shape":"String", - "documentation":"

Name of the internet provider.

", + "documentation":"

The name of the internet provider.

", "locationName":"org" } }, - "documentation":"

Continas information about the ISP organization of the remote IP address.

" + "documentation":"

Contains information about the ISP organization of the remote IP address.

" + }, + "Owner":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"String", + "documentation":"

The canonical user ID of the bucket owner. For information about locating your canonical user ID see Finding Your Account Canonical User ID.

", + "locationName":"id" + } + }, + "documentation":"

Contains information on the owner of the bucket.

" + }, + "PermissionConfiguration":{ + "type":"structure", + "members":{ + "BucketLevelPermissions":{ + "shape":"BucketLevelPermissions", + "documentation":"

Contains information about the bucket level permissions for the S3 bucket.

", + "locationName":"bucketLevelPermissions" + }, + "AccountLevelPermissions":{ + "shape":"AccountLevelPermissions", + "documentation":"

Contains information about the account level permissions on the S3 bucket.

", + "locationName":"accountLevelPermissions" + } + }, + "documentation":"

Contains information about how permissions are configured for the S3 bucket.

" }, "PortProbeAction":{ "type":"structure", "members":{ "Blocked":{ "shape":"Boolean", - "documentation":"

Port probe blocked information.

", + "documentation":"

Indicates whether EC2 blocked the port probe to the instance, such as with an ACL.

", "locationName":"blocked" }, "PortProbeDetails":{ "shape":"PortProbeDetails", - "documentation":"

A list of port probe details objects.

", + "documentation":"

A list of objects related to port probe details.

", "locationName":"portProbeDetails" } }, @@ -3248,17 +3582,17 @@ "members":{ "LocalPortDetails":{ "shape":"LocalPortDetails", - "documentation":"

Local port information of the connection.

", + "documentation":"

The local port information of the connection.

", "locationName":"localPortDetails" }, "LocalIpDetails":{ "shape":"LocalIpDetails", - "documentation":"

Local IP information of the connection.

", + "documentation":"

The local IP information of the connection.

", "locationName":"localIpDetails" }, "RemoteIpDetails":{ "shape":"RemoteIpDetails", - "documentation":"

Remote IP information of the connection.

", + "documentation":"

The remote IP information of the connection.

", "locationName":"remoteIpDetails" } }, @@ -3273,12 +3607,12 @@ "members":{ "PrivateDnsName":{ "shape":"String", - "documentation":"

Private DNS name of the EC2 instance.

", + "documentation":"

The private DNS name of the EC2 instance.

", "locationName":"privateDnsName" }, "PrivateIpAddress":{ "shape":"String", - "documentation":"

Private IP address of the EC2 instance.

", + "documentation":"

The private IP address of the EC2 instance.

", "locationName":"privateIpAddress" } }, @@ -3293,21 +3627,37 @@ "members":{ "Code":{ "shape":"String", - "documentation":"

Product code information.

", + "documentation":"

The product code information.

", "locationName":"code" }, "ProductType":{ "shape":"String", - "documentation":"

Product code type.

", + "documentation":"

The product code type.

", "locationName":"productType" } }, - "documentation":"

Contains information about the product code for the Ec2 instance.

" + "documentation":"

Contains information about the product code for the EC2 instance.

" }, "ProductCodes":{ "type":"list", "member":{"shape":"ProductCode"} }, + "PublicAccess":{ + "type":"structure", + "members":{ + "PermissionConfiguration":{ + "shape":"PermissionConfiguration", + "documentation":"

Contains information about how permissions are configured for the S3 bucket.

", + "locationName":"permissionConfiguration" + }, + "EffectivePermission":{ + "shape":"String", + "documentation":"

Describes the effective permission on this bucket after factoring all attached policies.

", + "locationName":"effectivePermission" + } + }, + "documentation":"

Describes the public access policies that apply to the S3 bucket.

" + }, "PublishingStatus":{ "type":"string", "enum":[ @@ -3324,43 +3674,43 @@ "members":{ "City":{ "shape":"City", - "documentation":"

City information of the remote IP address.

", + "documentation":"

The city information of the remote IP address.

", "locationName":"city" }, "Country":{ "shape":"Country", - "documentation":"

Country code of the remote IP address.

", + "documentation":"

The country code of the remote IP address.

", "locationName":"country" }, "GeoLocation":{ "shape":"GeoLocation", - "documentation":"

Location information of the remote IP address.

", + "documentation":"

The location information of the remote IP address.

", "locationName":"geoLocation" }, "IpAddressV4":{ "shape":"String", - "documentation":"

IPV4 remote address of the connection.

", + "documentation":"

The IPv4 remote address of the connection.

", "locationName":"ipAddressV4" }, "Organization":{ "shape":"Organization", - "documentation":"

ISP Organization information of the remote IP address.

", + "documentation":"

The ISP organization information of the remote IP address.

", "locationName":"organization" } }, - "documentation":"

Continas information about the remote IP address of the connection.

" + "documentation":"

Contains information about the remote IP address of the connection.

" }, "RemotePortDetails":{ "type":"structure", "members":{ "Port":{ "shape":"Integer", - "documentation":"

Port number of the remote connection.

", + "documentation":"

The port number of the remote connection.

", "locationName":"port" }, "PortName":{ "shape":"String", - "documentation":"

Port name of the remote connection.

", + "documentation":"

The port name of the remote connection.

", "locationName":"portName" } }, @@ -3374,6 +3724,11 @@ "documentation":"

The IAM access key details (IAM user information) of a user that engaged in the activity that prompted GuardDuty to generate a finding.

", "locationName":"accessKeyDetails" }, + "S3BucketDetails":{ + "shape":"S3BucketDetails", + "documentation":"

Contains information on the S3 bucket.

", + "locationName":"s3BucketDetails" + }, "InstanceDetails":{ "shape":"InstanceDetails", "documentation":"

The information about the EC2 instance associated with the activity that prompted GuardDuty to generate a finding.

", @@ -3381,23 +3736,73 @@ }, "ResourceType":{ "shape":"String", - "documentation":"

The type of the AWS resource.

", + "documentation":"

The type of AWS resource.

", "locationName":"resourceType" } }, "documentation":"

Contains information about the AWS resource associated with the activity that prompted GuardDuty to generate a finding.

" }, + "S3BucketDetail":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the S3 bucket.

", + "locationName":"arn" + }, + "Name":{ + "shape":"String", + "documentation":"

The name of the S3 bucket.

", + "locationName":"name" + }, + "Type":{ + "shape":"String", + "documentation":"

Describes whether the bucket is a source or destination bucket.

", + "locationName":"type" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time the bucket was created at.

", + "locationName":"createdAt" + }, + "Owner":{ + "shape":"Owner", + "documentation":"

The owner of the S3 bucket.

", + "locationName":"owner" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

All tags attached to the S3 bucket

", + "locationName":"tags" + }, + "DefaultServerSideEncryption":{ + "shape":"DefaultServerSideEncryption", + "documentation":"

Describes the server side encryption method used in the S3 bucket.

", + "locationName":"defaultServerSideEncryption" + }, + "PublicAccess":{ + "shape":"PublicAccess", + "documentation":"

Describes the public access policies that apply to the S3 bucket.

", + "locationName":"publicAccess" + } + } + }, + "S3BucketDetails":{ + "type":"list", + "member":{"shape":"S3BucketDetail"}, + "documentation":"

Contains information on the S3 bucket.

" + }, "SecurityGroup":{ "type":"structure", "members":{ "GroupId":{ "shape":"String", - "documentation":"

EC2 instance's security group ID.

", + "documentation":"

The security group ID of the EC2 instance.

", "locationName":"groupId" }, "GroupName":{ "shape":"String", - "documentation":"

EC2 instance's security group name.

", + "documentation":"

The security group name of the EC2 instance.

", "locationName":"groupName" } }, @@ -3412,7 +3817,7 @@ "members":{ "Action":{ "shape":"Action", - "documentation":"

Information about the activity described in a finding.

", + "documentation":"

Information about the activity that is described in a finding.

", "locationName":"action" }, "Evidence":{ @@ -3427,27 +3832,27 @@ }, "Count":{ "shape":"Integer", - "documentation":"

Total count of the occurrences of this finding type.

", + "documentation":"

The total count of the occurrences of this finding type.

", "locationName":"count" }, "DetectorId":{ "shape":"DetectorId", - "documentation":"

Detector ID for the GuardDuty service.

", + "documentation":"

The detector ID for the GuardDuty service.

", "locationName":"detectorId" }, "EventFirstSeen":{ "shape":"String", - "documentation":"

First seen timestamp of the activity that prompted GuardDuty to generate this finding.

", + "documentation":"

The first-seen timestamp of the activity that prompted GuardDuty to generate this finding.

", "locationName":"eventFirstSeen" }, "EventLastSeen":{ "shape":"String", - "documentation":"

Last seen timestamp of the activity that prompted GuardDuty to generate this finding.

", + "documentation":"

The last-seen timestamp of the activity that prompted GuardDuty to generate this finding.

", "locationName":"eventLastSeen" }, "ResourceRole":{ "shape":"String", - "documentation":"

Resource role information for this finding.

", + "documentation":"

The resource role information for this finding.

", "locationName":"resourceRole" }, "ServiceName":{ @@ -3457,7 +3862,7 @@ }, "UserFeedback":{ "shape":"String", - "documentation":"

Feedback left about the finding.

", + "documentation":"

Feedback that was submitted about the finding.

", "locationName":"userFeedback" } }, @@ -3468,12 +3873,12 @@ "members":{ "AttributeName":{ "shape":"String", - "documentation":"

Represents the finding attribute (for example, accountId) by which to sort findings.

", + "documentation":"

Represents the finding attribute (for example, accountId) to sort findings by.

", "locationName":"attributeName" }, "OrderBy":{ "shape":"OrderBy", - "documentation":"

Order by which the sorted findings are to be displayed.

", + "documentation":"

The order by which the sorted findings are to be displayed.

", "locationName":"orderBy" } }, @@ -3505,7 +3910,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"UnprocessedAccounts", - "documentation":"

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

", + "documentation":"

A list of objects that contain the unprocessed account and a result string that explains why it was unprocessed.

", "locationName":"unprocessedAccounts" } } @@ -3519,13 +3924,13 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector of the GuardDuty account that you want to stop from monitor members' findings.

", + "documentation":"

The unique ID of the detector associated with the GuardDuty master account that is monitoring member accounts.

", "location":"uri", "locationName":"detectorId" }, "AccountIds":{ "shape":"AccountIds", - "documentation":"

A list of account IDs of the GuardDuty member accounts whose findings you want the master account to stop monitoring.

", + "documentation":"

A list of account IDs for the member accounts to stop monitoring.

", "locationName":"accountIds" } } @@ -3536,7 +3941,7 @@ "members":{ "UnprocessedAccounts":{ "shape":"UnprocessedAccounts", - "documentation":"

A list of objects containing the unprocessed account and a result string explaining why it was unprocessed.

", + "documentation":"

A list of objects that contain an accountId for each account that could not be processed, and a result string that indicates why the account was not processed.

", "locationName":"unprocessedAccounts" } } @@ -3547,16 +3952,16 @@ "members":{ "Key":{ "shape":"String", - "documentation":"

EC2 instance tag key.

", + "documentation":"

The EC2 instance tag key.

", "locationName":"key" }, "Value":{ "shape":"String", - "documentation":"

EC2 instance tag value.

", + "documentation":"

The EC2 instance tag value.

", "locationName":"value" } }, - "documentation":"

Contains information about a tag associated with the Ec2 instance.

" + "documentation":"

Contains information about a tag associated with the EC2 instance.

" }, "TagKey":{ "type":"string", @@ -3667,6 +4072,7 @@ "type":"list", "member":{"shape":"String"} }, + "Timestamp":{"type":"timestamp"}, "UnarchiveFindingsRequest":{ "type":"structure", "required":[ @@ -3682,7 +4088,7 @@ }, "FindingIds":{ "shape":"FindingIds", - "documentation":"

IDs of the findings to unarchive.

", + "documentation":"

The IDs of the findings to unarchive.

", "locationName":"findingIds" } } @@ -3701,7 +4107,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

AWS Account ID.

", + "documentation":"

The AWS account ID.

", "locationName":"accountId" }, "Result":{ @@ -3710,7 +4116,7 @@ "locationName":"result" } }, - "documentation":"

Contains information about the accounts that were not processed.

" + "documentation":"

Contains information about the accounts that weren't processed.

" }, "UnprocessedAccounts":{ "type":"list", @@ -3761,7 +4167,7 @@ }, "FindingPublishingFrequency":{ "shape":"FindingPublishingFrequency", - "documentation":"

A enum value that specifies how frequently findings are exported, such as to CloudWatch Events.

", + "documentation":"

An enum value that specifies how frequently findings are exported, such as to CloudWatch Events.

", "locationName":"findingPublishingFrequency" } } @@ -3839,7 +4245,7 @@ }, "FindingIds":{ "shape":"FindingIds", - "documentation":"

IDs of the findings that you want to mark as useful or not useful.

", + "documentation":"

The IDs of the findings that you want to mark as useful or not useful.

", "locationName":"findingIds" }, "Feedback":{ @@ -3885,12 +4291,12 @@ }, "Location":{ "shape":"Location", - "documentation":"

The updated URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key).

", + "documentation":"

The updated URI of the file that contains the IPSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Activate":{ "shape":"Boolean", - "documentation":"

The updated boolean value that specifies whether the IPSet is active or not.

", + "documentation":"

The updated Boolean value that specifies whether the IPSet is active or not.

", "locationName":"activate" } } @@ -3900,6 +4306,31 @@ "members":{ } }, + "UpdateOrganizationConfigurationRequest":{ + "type":"structure", + "required":[ + "DetectorId", + "AutoEnable" + ], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "documentation":"

The ID of the detector to update the delegated administrator for.

", + "location":"uri", + "locationName":"detectorId" + }, + "AutoEnable":{ + "shape":"Boolean", + "documentation":"

Indicates whether to automatically enable member accounts in the organization.

", + "locationName":"autoEnable" + } + } + }, + "UpdateOrganizationConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdatePublishingDestinationRequest":{ "type":"structure", "required":[ @@ -3909,13 +4340,13 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The ID of the

", + "documentation":"

The ID of the detector associated with the publishing destinations to update.

", "location":"uri", "locationName":"detectorId" }, "DestinationId":{ "shape":"String", - "documentation":"

The ID of the detector associated with the publishing destinations to update.

", + "documentation":"

The ID of the publishing destination to update.

", "location":"uri", "locationName":"destinationId" }, @@ -3957,12 +4388,12 @@ }, "Location":{ "shape":"Location", - "documentation":"

The updated URI of the file that contains the ThreateIntelSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key)

", + "documentation":"

The updated URI of the file that contains the ThreateIntelSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key.

", "locationName":"location" }, "Activate":{ "shape":"Boolean", - "documentation":"

The updated boolean value that specifies whether the ThreateIntelSet is active or not.

", + "documentation":"

The updated Boolean value that specifies whether the ThreateIntelSet is active or not.

", "locationName":"activate" } } @@ -3973,5 +4404,5 @@ } } }, - "documentation":"

Amazon GuardDuty is a continuous security monitoring service that analyzes and processes the following data sources: VPC Flow Logs, AWS CloudTrail event logs, and DNS logs. It uses threat intelligence feeds, such as lists of malicious IPs and domains, and machine learning to identify unexpected and potentially unauthorized and malicious activity within your AWS environment. This can include issues like escalations of privileges, uses of exposed credentials, or communication with malicious IPs, URLs, or domains. For example, GuardDuty can detect compromised EC2 instances serving malware or mining bitcoin. It also monitors AWS account access behavior for signs of compromise, such as unauthorized infrastructure deployments, like instances deployed in a region that has never been used, or unusual API calls, like a password policy change to reduce password strength. GuardDuty informs you of the status of your AWS environment by producing security findings that you can view in the GuardDuty console or through Amazon CloudWatch events. For more information, see Amazon GuardDuty User Guide.

" + "documentation":"

Amazon GuardDuty is a continuous security monitoring service that analyzes and processes the following data sources: VPC Flow Logs, AWS CloudTrail event logs, and DNS logs. It uses threat intelligence feeds (such as lists of malicious IPs and domains) and machine learning to identify unexpected, potentially unauthorized, and malicious activity within your AWS environment. This can include issues like escalations of privileges, uses of exposed credentials, or communication with malicious IPs, URLs, or domains. For example, GuardDuty can detect compromised EC2 instances that serve malware or mine bitcoin.

GuardDuty also monitors AWS account access behavior for signs of compromise. Some examples of this are unauthorized infrastructure deployments such as EC2 instances deployed in a Region that has never been used, or unusual API calls like a password policy change to reduce password strength.

GuardDuty informs you of the status of your AWS environment by producing security findings that you can view in the GuardDuty console or through Amazon CloudWatch events. For more information, see the Amazon GuardDuty User Guide .

" } diff --git a/services/health/pom.xml b/services/health/pom.xml index f70223a6734e..b85f7c334c92 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/health/src/main/resources/codegen-resources/paginators-1.json b/services/health/src/main/resources/codegen-resources/paginators-1.json index e7e6c2ad8806..9881954c502e 100644 --- a/services/health/src/main/resources/codegen-resources/paginators-1.json +++ b/services/health/src/main/resources/codegen-resources/paginators-1.json @@ -3,6 +3,9 @@ "DescribeAffectedAccountsForOrganization": { "input_token": "nextToken", "limit_key": "maxResults", + "non_aggregate_keys": [ + "eventScopeCode" + ], "output_token": "nextToken", "result_key": "affectedAccounts" }, diff --git a/services/health/src/main/resources/codegen-resources/service-2.json b/services/health/src/main/resources/codegen-resources/service-2.json index 877c1145b80d..18a6ecc9b3ba 100644 --- a/services/health/src/main/resources/codegen-resources/service-2.json +++ b/services/health/src/main/resources/codegen-resources/service-2.json @@ -276,6 +276,7 @@ "shape":"affectedAccountsList", "documentation":"

A JSON set of elements of the affected accounts.

" }, + "eventScopeCode":{"shape":"eventScopeCode"}, "nextToken":{ "shape":"nextToken", "documentation":"

If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.

" @@ -689,16 +690,14 @@ "statusCode":{ "shape":"eventStatusCode", "documentation":"

The most recent status of the event. Possible values are open, closed, and upcoming.

" - } + }, + "eventScopeCode":{"shape":"eventScopeCode"} }, "documentation":"

Summary information about an AWS Health event.

" }, "EventAccountFilter":{ "type":"structure", - "required":[ - "eventArn", - "awsAccountId" - ], + "required":["eventArn"], "members":{ "eventArn":{ "shape":"eventArn", @@ -950,6 +949,7 @@ "shape":"eventTypeCategory", "documentation":"

The category of the event type.

" }, + "eventScopeCode":{"shape":"eventScopeCode"}, "region":{ "shape":"region", "documentation":"

The AWS Region name of the event.

" @@ -1131,8 +1131,8 @@ "entityUrl":{"type":"string"}, "entityValue":{ "type":"string", - "max":256, - "pattern":".{0,256}" + "max":1224, + "pattern":".{0,1224}" }, "entityValueList":{ "type":"list", @@ -1161,6 +1161,14 @@ "key":{"shape":"metadataKey"}, "value":{"shape":"metadataValue"} }, + "eventScopeCode":{ + "type":"string", + "enum":[ + "PUBLIC", + "ACCOUNT_SPECIFIC", + "NONE" + ] + }, "eventStatusCode":{ "type":"string", "enum":[ @@ -1222,10 +1230,13 @@ "max":100, "min":10 }, - "metadataKey":{"type":"string"}, + "metadataKey":{ + "type":"string", + "max":32766 + }, "metadataValue":{ "type":"string", - "max":10240 + "max":32766 }, "nextToken":{ "type":"string", diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml new file mode 100644 index 000000000000..122c890fc8a0 --- /dev/null +++ b/services/honeycode/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.13.56-SNAPSHOT + + honeycode + AWS Java SDK :: Services :: Honeycode + The AWS Java SDK for Honeycode module holds the client classes that are used for + communicating with Honeycode. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.honeycode + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/honeycode/src/main/resources/codegen-resources/paginators-1.json b/services/honeycode/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/honeycode/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/honeycode/src/main/resources/codegen-resources/service-2.json b/services/honeycode/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..4ec967eb53b3 --- /dev/null +++ b/services/honeycode/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,411 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-03-01", + "endpointPrefix":"honeycode", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Honeycode", + "serviceFullName":"Amazon Honeycode", + "serviceId":"Honeycode", + "signatureVersion":"v4", + "signingName":"honeycode", + "uid":"honeycode-2020-03-01" + }, + "operations":{ + "GetScreenData":{ + "name":"GetScreenData", + "http":{ + "method":"POST", + "requestUri":"/screendata" + }, + "input":{"shape":"GetScreenDataRequest"}, + "output":{"shape":"GetScreenDataResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

The GetScreenData API allows retrieval of data from a screen in a Honeycode app. The API allows setting local variables in the screen to filter, sort or otherwise affect what will be displayed on the screen.

" + }, + "InvokeScreenAutomation":{ + "name":"InvokeScreenAutomation", + "http":{ + "method":"POST", + "requestUri":"/workbooks/{workbookId}/apps/{appId}/screens/{screenId}/automations/{automationId}" + }, + "input":{"shape":"InvokeScreenAutomationRequest"}, + "output":{"shape":"InvokeScreenAutomationResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AutomationExecutionException"}, + {"shape":"AutomationExecutionTimeoutException"}, + {"shape":"RequestTimeoutException"} + ], + "documentation":"

The InvokeScreenAutomation API allows invoking an action defined in a screen in a Honeycode app. The API allows setting local variables, which can then be used in the automation being invoked. This allows automating the Honeycode app interactions to write, update or delete data in the workbook.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

You do not have sufficient access to perform this action. Check that the workbook is owned by you and your IAM policy allows access to the screen/automation in the request.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "AutomationExecutionException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The automation execution did not end successfully.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "AutomationExecutionTimeoutException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The automation execution timed out.

", + "error":{"httpStatusCode":504}, + "exception":true + }, + "ClientRequestToken":{ + "type":"string", + "max":64, + "min":32 + }, + "ColumnMetadata":{ + "type":"structure", + "required":[ + "name", + "format" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the column.

" + }, + "format":{ + "shape":"Format", + "documentation":"

The format of the column.

" + } + }, + "documentation":"

Metadata for column in the table.

" + }, + "DataItem":{ + "type":"structure", + "members":{ + "overrideFormat":{ + "shape":"Format", + "documentation":"

The overrideFormat is optional and is specified only if a particular row of data has a different format for the data than the default format defined on the screen or the table.

" + }, + "rawValue":{ + "shape":"RawValue", + "documentation":"

The raw value of the data. e.g. jsmith@example.com

" + }, + "formattedValue":{ + "shape":"FormattedValue", + "documentation":"

The formatted value of the data. e.g. John Smith.

" + } + }, + "documentation":"

The data in a particular data cell defined on the screen.

", + "sensitive":true + }, + "DataItems":{ + "type":"list", + "member":{"shape":"DataItem"} + }, + "ErrorMessage":{"type":"string"}, + "Format":{ + "type":"string", + "enum":[ + "AUTO", + "NUMBER", + "CURRENCY", + "DATE", + "TIME", + "DATE_TIME", + "PERCENTAGE", + "TEXT", + "ACCOUNTING", + "CONTACT", + "ROWLINK" + ] + }, + "FormattedValue":{"type":"string"}, + "GetScreenDataRequest":{ + "type":"structure", + "required":[ + "workbookId", + "appId", + "screenId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook that contains the screen.

" + }, + "appId":{ + "shape":"ResourceId", + "documentation":"

The ID of the app that contains the screem.

" + }, + "screenId":{ + "shape":"ResourceId", + "documentation":"

The ID of the screen.

" + }, + "variables":{ + "shape":"VariableValueMap", + "documentation":"

Variables are optional and are needed only if the screen requires them to render correctly. Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The number of results to be returned on a single page. Specify a number between 1 and 100. The maximum value is 100.

This parameter is optional. If you don't specify this parameter, the default page size is 100.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

This parameter is optional. If a nextToken is not specified, the API returns the first page of data.

Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException.

" + } + } + }, + "GetScreenDataResult":{ + "type":"structure", + "required":[ + "results", + "workbookCursor" + ], + "members":{ + "results":{ + "shape":"ResultSetMap", + "documentation":"

A map of all the rows on the screen keyed by block name.

" + }, + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

Indicates the cursor of the workbook at which the data returned by this workbook is read. Workbook cursor keeps increasing with every update and the increments are not sequential.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the query has been loaded.

" + } + } + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

There were unexpected errors from the server.

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "InvokeScreenAutomationRequest":{ + "type":"structure", + "required":[ + "workbookId", + "appId", + "screenId", + "screenAutomationId" + ], + "members":{ + "workbookId":{ + "shape":"ResourceId", + "documentation":"

The ID of the workbook that contains the screen automation.

", + "location":"uri", + "locationName":"workbookId" + }, + "appId":{ + "shape":"ResourceId", + "documentation":"

The ID of the app that contains the screen automation.

", + "location":"uri", + "locationName":"appId" + }, + "screenId":{ + "shape":"ResourceId", + "documentation":"

The ID of the screen that contains the screen automation.

", + "location":"uri", + "locationName":"screenId" + }, + "screenAutomationId":{ + "shape":"ResourceId", + "documentation":"

The ID of the automation action to be performed.

", + "location":"uri", + "locationName":"automationId" + }, + "variables":{ + "shape":"VariableValueMap", + "documentation":"

Variables are optional and are needed only if the screen requires them to render correctly. Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen.

" + }, + "rowId":{ + "shape":"RowId", + "documentation":"

The row ID for the automation if the automation is defined inside a block with source or list.

" + }, + "clientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

The request token for performing the automation action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will return the response of the previous call rather than performing the action again.

Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days.

" + } + } + }, + "InvokeScreenAutomationResult":{ + "type":"structure", + "required":["workbookCursor"], + "members":{ + "workbookCursor":{ + "shape":"WorkbookCursor", + "documentation":"

The updated workbook cursor after performing the automation action.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "Name":{ + "type":"string", + "sensitive":true + }, + "PaginationToken":{ + "type":"string", + "max":1024, + "min":1 + }, + "RawValue":{"type":"string"}, + "RequestTimeoutException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request timed out.

", + "error":{"httpStatusCode":504}, + "exception":true + }, + "ResourceId":{ + "type":"string", + "pattern":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

A Workbook, App, Screen or Screen Automation was not found with the given ID.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResultHeader":{ + "type":"list", + "member":{"shape":"ColumnMetadata"} + }, + "ResultRow":{ + "type":"structure", + "required":["dataItems"], + "members":{ + "rowId":{ + "shape":"RowId", + "documentation":"

The ID for a particular row.

" + }, + "dataItems":{ + "shape":"DataItems", + "documentation":"

List of all the data cells in a row.

" + } + }, + "documentation":"

A single row in the ResultSet.

" + }, + "ResultRows":{ + "type":"list", + "member":{"shape":"ResultRow"} + }, + "ResultSet":{ + "type":"structure", + "required":[ + "headers", + "rows" + ], + "members":{ + "headers":{ + "shape":"ResultHeader", + "documentation":"

List of headers for all the data cells in the block. The header identifies the name and default format of the data cell. Data cells appear in the same order in all rows as defined in the header. The names and formats are not repeated in the rows. If a particular row does not have a value for a data cell, a blank value is used.

For example, a task list that displays the task name, due date and assigned person might have headers [ { \"name\": \"Task Name\"}, {\"name\": \"Due Date\", \"format\": \"DATE\"}, {\"name\": \"Assigned\", \"format\": \"CONTACT\"} ]. Every row in the result will have the task name as the first item, due date as the second item and assigned person as the third item. If a particular task does not have a due date, that row will still have a blank value in the second element and the assigned person will still be in the third element.

" + }, + "rows":{ + "shape":"ResultRows", + "documentation":"

List of rows returned by the request. Each row has a row Id and a list of data cells in that row. The data cells will be present in the same order as they are defined in the header.

" + } + }, + "documentation":"

ResultSet contains the results of the request for a single block or list defined on the screen.

" + }, + "ResultSetMap":{ + "type":"map", + "key":{"shape":"Name"}, + "value":{"shape":"ResultSet"} + }, + "RowId":{ + "type":"string", + "pattern":"row:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\\/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

Remote service is unreachable.

", + "error":{"httpStatusCode":503}, + "exception":true + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

Tps(transactions per second) rate reached.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

Request is invalid. The message in the response contains details on why the request is invalid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "VariableName":{ + "type":"string", + "sensitive":true + }, + "VariableValue":{ + "type":"structure", + "required":["rawValue"], + "members":{ + "rawValue":{ + "shape":"RawValue", + "documentation":"

Raw value of the variable.

" + } + }, + "documentation":"

The input variables to the app to be used by the InvokeScreenAutomation action request.

", + "sensitive":true + }, + "VariableValueMap":{ + "type":"map", + "key":{"shape":"VariableName"}, + "value":{"shape":"VariableValue"}, + "sensitive":true + }, + "WorkbookCursor":{"type":"long"} + }, + "documentation":"

Amazon Honeycode is a fully managed service that allows you to quickly build mobile and web apps for teams—without programming. Build Honeycode apps for managing almost anything, like projects, customers, operations, approvals, resources, and even your team.

" +} diff --git a/services/iam/pom.xml b/services/iam/pom.xml index b52a465b3d23..6f8ab9692832 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/iam/src/main/resources/codegen-resources/service-2.json b/services/iam/src/main/resources/codegen-resources/service-2.json index 8f3e7215513a..97e6e500b98d 100644 --- a/services/iam/src/main/resources/codegen-resources/service-2.json +++ b/services/iam/src/main/resources/codegen-resources/service-2.json @@ -42,7 +42,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Adds the specified IAM role to the specified instance profile. An instance profile can contain only one role, and this limit cannot be increased. You can remove the existing role and then add a different role to an instance profile. You must then wait for the change to appear across all of AWS because of eventual consistency. To force the change, you must disassociate the instance profile and then associate the instance profile, or you can stop your instance and then restart it.

The caller of this API must be granted the PassRole permission on the IAM role by a permissions policy.

For more information about roles, go to Working with Roles. For more information about instance profiles, go to About Instance Profiles.

" + "documentation":"

Adds the specified IAM role to the specified instance profile. An instance profile can contain only one role. (The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.) You can remove the existing role and then add a different role to an instance profile. You must then wait for the change to appear across all of AWS because of eventual consistency. To force the change, you must disassociate the instance profile and then associate the instance profile, or you can stop your instance and then restart it.

The caller of this API must be granted the PassRole permission on the IAM role by a permissions policy.

For more information about roles, go to Working with Roles. For more information about instance profiles, go to About Instance Profiles.

" }, "AddUserToGroup":{ "name":"AddUserToGroup", @@ -140,7 +140,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new AWS secret access key and corresponding AWS access key ID for the specified user. The default status for new keys is Active.

If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. This operation works for access keys under the AWS account. Consequently, you can use this operation to manage AWS account root user credentials. This is true even if the AWS account has no associated users.

For information about limits on the number of keys you can create, see Limitations on IAM Entities in the IAM User Guide.

To ensure the security of your AWS account, the secret access key is accessible only during key and user creation. You must save the key (for example, in a text file) if you want to be able to access it again. If a secret key is lost, you can delete the access keys for the associated user and then create new keys.

" + "documentation":"

Creates a new AWS secret access key and corresponding AWS access key ID for the specified user. The default status for new keys is Active.

If you do not specify a user name, IAM determines the user name implicitly based on the AWS access key ID signing the request. This operation works for access keys under the AWS account. Consequently, you can use this operation to manage AWS account root user credentials. This is true even if the AWS account has no associated users.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

To ensure the security of your AWS account, the secret access key is accessible only during key and user creation. You must save the key (for example, in a text file) if you want to be able to access it again. If a secret key is lost, you can delete the access keys for the associated user and then create new keys.

" }, "CreateAccountAlias":{ "name":"CreateAccountAlias", @@ -173,7 +173,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new group.

For information about the number of groups you can create, see Limitations on IAM Entities in the IAM User Guide.

" + "documentation":"

Creates a new group.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

" }, "CreateInstanceProfile":{ "name":"CreateInstanceProfile", @@ -191,7 +191,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new instance profile. For information about instance profiles, go to About Instance Profiles.

For information about the number of instance profiles you can create, see Limitations on IAM Entities in the IAM User Guide.

" + "documentation":"

Creates a new instance profile. For information about instance profiles, go to About Instance Profiles.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

" }, "CreateLoginProfile":{ "name":"CreateLoginProfile", @@ -291,7 +291,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new role for your AWS account. For more information about roles, go to IAM Roles. For information about limitations on role names and the number of roles you can create, go to Limitations on IAM Entities in the IAM User Guide.

" + "documentation":"

Creates a new role for your AWS account. For more information about roles, go to IAM Roles. The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

" }, "CreateSAMLProvider":{ "name":"CreateSAMLProvider", @@ -368,7 +368,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new IAM user for your AWS account.

For information about limitations on the number of IAM users you can create, see Limitations on IAM Entities in the IAM User Guide.

" + "documentation":"

Creates a new IAM user for your AWS account.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

" }, "CreateVirtualMFADevice":{ "name":"CreateVirtualMFADevice", @@ -386,7 +386,7 @@ {"shape":"EntityAlreadyExistsException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Creates a new virtual MFA device for the AWS account. After creating the virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. For more information about creating and working with virtual MFA devices, go to Using a Virtual MFA Device in the IAM User Guide.

For information about limits on the number of MFA devices you can create, see Limitations on Entities in the IAM User Guide.

The seed information contained in the QR code and the Base32 string should be treated like any other secret access information. In other words, protect the seed information as you would your AWS access keys or your passwords. After you provision your virtual device, you should ensure that the information is destroyed following secure procedures.

" + "documentation":"

Creates a new virtual MFA device for the AWS account. After creating the virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. For more information about creating and working with virtual MFA devices, go to Using a Virtual MFA Device in the IAM User Guide.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

The seed information contained in the QR code and the Base32 string should be treated like any other secret access information. In other words, protect the seed information as you would your AWS access keys or your passwords. After you provision your virtual device, you should ensure that the information is destroyed following secure procedures.

" }, "DeactivateMFADevice":{ "name":"DeactivateMFADevice", @@ -849,7 +849,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Generates a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access AWS services. Recent activity usually appears within four hours. IAM reports activity for the last 365 days, or less if your Region began supporting this feature within the last year. For more information, see Regions Where Data Is Tracked.

The service last accessed data includes all attempts to access an AWS API, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

  • GetServiceLastAccessedDetails – Use this operation for users, groups, roles, or policies to list every AWS service that the resource could access using permissions policies. For each service, the response includes information about the most recent access attempt.

  • GetServiceLastAccessedDetailsWithEntities – Use this operation for groups and policies to list information about the associated entities (users or roles) that attempted to access a specific AWS service.

To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service last accessed data, see Reducing Policy Scope by Viewing User Activity in the IAM User Guide.

" + "documentation":"

Generates a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access AWS services. Recent activity usually appears within four hours. IAM reports activity for the last 365 days, or less if your Region began supporting this feature within the last year. For more information, see Regions Where Data Is Tracked.

The service last accessed data includes all attempts to access an AWS API, not just the successful ones. This includes all attempts that were made using the AWS Management Console, the AWS API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM Events with CloudTrail in the IAM User Guide.

The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

  • GetServiceLastAccessedDetails – Use this operation for users, groups, roles, or policies to list every AWS service that the resource could access using permissions policies. For each service, the response includes information about the most recent access attempt.

    The JobId returned by GenerateServiceLastAccessedDetail must be used by the same role within a session, or by the same user when used to call GetServiceLastAccessedDetail.

  • GetServiceLastAccessedDetailsWithEntities – Use this operation for groups and policies to list information about the associated entities (users or roles) that attempted to access a specific AWS service.

To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For more information about service and action last accessed data, see Reducing Permissions Using Service Last Accessed Data in the IAM User Guide.

" }, "GetAccessKeyLastUsed":{ "name":"GetAccessKeyLastUsed", @@ -909,7 +909,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

Retrieves information about IAM entity usage and IAM quotas in the AWS account.

For information about limitations on IAM entities, see Limitations on IAM Entities in the IAM User Guide.

" + "documentation":"

Retrieves information about IAM entity usage and IAM quotas in the AWS account.

The number and size of IAM resources in an AWS account are limited. For more information, see IAM and STS Quotas in the IAM User Guide.

" }, "GetContextKeysForCustomPolicy":{ "name":"GetContextKeysForCustomPolicy", @@ -1201,7 +1201,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Retrieves a service last accessed report that was created using the GenerateServiceLastAccessedDetails operation. You can use the JobId parameter in GetServiceLastAccessedDetails to retrieve the status of your report job. When the report is complete, you can retrieve the generated report. The report includes a list of AWS services that the resource (user, group, role, or managed policy) can access.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For each service that the resource could access using permissions policies, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails operation returns the reason that it failed.

The GetServiceLastAccessedDetails operation returns a list of services. This list includes the number of entities that have attempted to access the service and the date and time of the last attempt. It also returns the ARN of the following entity, depending on the resource ARN that you used to generate the report:

  • User – Returns the user ARN that you used to generate the report

  • Group – Returns the ARN of the group member (user) that last attempted to access the service

  • Role – Returns the role ARN that you used to generate the report

  • Policy – Returns the ARN of the user or role that last used the policy to attempt to access the service

By default, the list is sorted by service namespace.

" + "documentation":"

Retrieves a service last accessed report that was created using the GenerateServiceLastAccessedDetails operation. You can use the JobId parameter in GetServiceLastAccessedDetails to retrieve the status of your report job. When the report is complete, you can retrieve the generated report. The report includes a list of AWS services that the resource (user, group, role, or managed policy) can access.

Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, AWS Organizations policies, IAM permissions boundaries, and AWS STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating Policies in the IAM User Guide.

For each service that the resource could access using permissions policies, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails operation returns the reason that it failed.

The GetServiceLastAccessedDetails operation returns a list of services. This list includes the number of entities that have attempted to access the service and the date and time of the last attempt. It also returns the ARN of the following entity, depending on the resource ARN that you used to generate the report:

  • User – Returns the user ARN that you used to generate the report

  • Group – Returns the ARN of the group member (user) that last attempted to access the service

  • Role – Returns the role ARN that you used to generate the report

  • Policy – Returns the ARN of the user or role that last used the policy to attempt to access the service

By default, the list is sorted by service namespace.

If you specified ACTION_LEVEL granularity when you generated the report, this operation returns service and action last accessed data. This includes the most recent access attempt for each tracked action within a service. Otherwise, this operation returns only service data.

For more information about service and action last accessed data, see Reducing Permissions Using Service Last Accessed Data in the IAM User Guide.

" }, "GetServiceLastAccessedDetailsWithEntities":{ "name":"GetServiceLastAccessedDetailsWithEntities", @@ -2291,6 +2291,13 @@ } }, "shapes":{ + "AccessAdvisorUsageGranularityType":{ + "type":"string", + "enum":[ + "SERVICE_LEVEL", + "ACTION_LEVEL" + ] + }, "AccessDetail":{ "type":"structure", "required":[ @@ -2598,7 +2605,7 @@ "documentation":"

The data type of the value (or values) specified in the ContextKeyValues parameter.

" } }, - "documentation":"

Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.

This data type is used as an input parameter to SimulateCustomPolicy and SimulatePrincipalPolicy .

" + "documentation":"

Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.

This data type is used as an input parameter to SimulateCustomPolicy and SimulatePrincipalPolicy.

" }, "ContextEntryListType":{ "type":"list", @@ -3709,6 +3716,10 @@ "Arn":{ "shape":"arnType", "documentation":"

The ARN of the IAM resource (user, group, role, or managed policy) used to generate information about when the resource was last used in an attempt to access an AWS service.

" + }, + "Granularity":{ + "shape":"AccessAdvisorUsageGranularityType", + "documentation":"

The level of detail that you want to generate. You can specify whether you want to generate information about the last attempt to access services or actions. If you specify service-level granularity, this operation generates only service data. If you specify action-level granularity, it generates service and action data. If you don't include this optional parameter, the operation generates service data.

" } } }, @@ -3717,7 +3728,7 @@ "members":{ "JobId":{ "shape":"jobIDType", - "documentation":"

The job ID that you can use in the GetServiceLastAccessedDetails or GetServiceLastAccessedDetailsWithEntities operations.

" + "documentation":"

The JobId that you can use in the GetServiceLastAccessedDetails or GetServiceLastAccessedDetailsWithEntities operations. The JobId returned by GenerateServiceLastAccessedDetail must be used by the same role within a session, or by the same user when used to call GetServiceLastAccessedDetail.

" } } }, @@ -4282,7 +4293,7 @@ "members":{ "JobId":{ "shape":"jobIDType", - "documentation":"

The ID of the request generated by the GenerateServiceLastAccessedDetails operation.

" + "documentation":"

The ID of the request generated by the GenerateServiceLastAccessedDetails operation. The JobId returned by GenerateServiceLastAccessedDetail must be used by the same role within a session, or by the same user when used to call GetServiceLastAccessedDetail.

" }, "MaxItems":{ "shape":"maxItemsType", @@ -4307,6 +4318,10 @@ "shape":"jobStatusType", "documentation":"

The status of the job.

" }, + "JobType":{ + "shape":"AccessAdvisorUsageGranularityType", + "documentation":"

The type of job. Service jobs return information about when each service was last accessed. Action jobs also include information about when tracked actions within the service were last accessed.

" + }, "JobCreationDate":{ "shape":"dateType", "documentation":"

The date and time, in ISO 8601 date-time format, when the report job was created.

" @@ -4321,7 +4336,7 @@ }, "IsTruncated":{ "shape":"booleanType", - "documentation":"

A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all your results.

" + "documentation":"

A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items. Note that IAM might return fewer than the MaxItems number of results even when there are more results available. We recommend that you check IsTruncated after every call to ensure that you receive all your results.

" }, "Marker":{ "shape":"responseMarkerType", @@ -4666,7 +4681,7 @@ "members":{ "message":{"shape":"limitExceededMessage"} }, - "documentation":"

The request was rejected because it attempted to create resources beyond the current AWS account limits. The error message describes the limit exceeded.

", + "documentation":"

The request was rejected because it attempted to create resources beyond the current AWS account limitations. The error message describes the limit exceeded.

", "error":{ "code":"LimitExceeded", "httpStatusCode":409, @@ -6783,9 +6798,17 @@ "shape":"arnType", "documentation":"

The ARN of the authenticated entity (user or role) that last attempted to access the service. AWS does not report unauthenticated requests.

This field is null if no IAM entities attempted to access the service within the reporting period.

" }, + "LastAuthenticatedRegion":{ + "shape":"stringType", + "documentation":"

The Region from which the authenticated entity (user or role) last attempted to access the service. AWS does not report unauthenticated requests.

This field is null if no IAM entities attempted to access the service within the reporting period.

" + }, "TotalAuthenticatedEntities":{ "shape":"integerType", "documentation":"

The total number of authenticated principals (root user, IAM users, or IAM roles) that have attempted to access the service.

This field is null if no principals attempted to access the service within the reporting period.

" + }, + "TrackedActionsLastAccessed":{ + "shape":"TrackedActionsLastAccessed", + "documentation":"

An object that contains details about the most recent attempt to access a tracked action within the service.

This field is null if there no tracked actions or if the principal did not use the tracked actions within the reporting period. This field is also null if the report was generated at the service level and not the action level. For more information, see the Granularity field in GenerateServiceLastAccessedDetails.

" } }, "documentation":"

Contains details about the most recent attempt to access the service.

This data type is used as a response element in the GetServiceLastAccessedDetails operation.

" @@ -7039,7 +7062,7 @@ }, "PermissionsBoundaryPolicyInputList":{ "shape":"SimulationPolicyListType", - "documentation":"

The IAM permissions boundary policy to simulate. The permissions boundary sets the maximum permissions that the entity can have. You can input only one permissions boundary when you pass a policy to this operation. An IAM entity can only have one permissions boundary in effect at a time. For example, if a permissions boundary is attached to an entity and you pass in a different permissions boundary policy using this parameter, then the new permission boundary policy is used for the simulation. For more information about permissions boundaries, see Permissions Boundaries for IAM Entities in the IAM User Guide. The policy input is specified as a string containing the complete, valid JSON text of a permissions boundary policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" + "documentation":"

The IAM permissions boundary policy to simulate. The permissions boundary sets the maximum permissions that the entity can have. You can input only one permissions boundary when you pass a policy to this operation. An IAM entity can only have one permissions boundary in effect at a time. For example, if a permissions boundary is attached to an entity and you pass in a different permissions boundary policy using this parameter, then the new permissions boundary policy is used for the simulation. For more information about permissions boundaries, see Permissions Boundaries for IAM Entities in the IAM User Guide. The policy input is specified as a string containing the complete, valid JSON text of a permissions boundary policy.

The regex pattern used to validate this parameter is a string of characters consisting of the following:

  • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

  • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

  • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

" }, "ActionNames":{ "shape":"ActionNameListType", @@ -7161,6 +7184,29 @@ } } }, + "TrackedActionLastAccessed":{ + "type":"structure", + "members":{ + "ActionName":{ + "shape":"stringType", + "documentation":"

The name of the tracked action to which access was attempted. Tracked actions are actions that report activity to IAM.

" + }, + "LastAccessedEntity":{"shape":"arnType"}, + "LastAccessedTime":{ + "shape":"dateType", + "documentation":"

The date and time, in ISO 8601 date-time format, when an authenticated entity most recently attempted to access the tracked service. AWS does not report unauthenticated requests.

This field is null if no IAM entities attempted to access the service within the reporting period.

" + }, + "LastAccessedRegion":{ + "shape":"stringType", + "documentation":"

The Region from which the authenticated entity (user or role) last attempted to access the tracked action. AWS does not report unauthenticated requests.

This field is null if no IAM entities attempted to access the service within the reporting period.

" + } + }, + "documentation":"

Contains details about the most recent attempt to access an action within the service.

This data type is used as a response element in the GetServiceLastAccessedDetails operation.

" + }, + "TrackedActionsLastAccessed":{ + "type":"list", + "member":{"shape":"TrackedActionLastAccessed"} + }, "UnmodifiableEntityException":{ "type":"structure", "members":{ @@ -8285,5 +8331,5 @@ "pattern":"[\\w+=,.@-]+" } }, - "documentation":"AWS Identity and Access Management

AWS Identity and Access Management (IAM) is a web service that you can use to manage users and user permissions under your AWS account. This guide provides descriptions of IAM actions that you can call programmatically. For general information about IAM, see AWS Identity and Access Management (IAM). For the user guide for IAM, see Using IAM.

AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to IAM and AWS. For example, the SDKs take care of tasks such as cryptographically signing requests (see below), managing errors, and retrying requests automatically. For information about the AWS SDKs, including how to download and install them, see the Tools for Amazon Web Services page.

We recommend that you use the AWS SDKs to make programmatic API calls to IAM. However, you can also use the IAM Query API to make direct calls to the IAM web service. To learn more about the IAM Query API, see Making Query Requests in the Using IAM guide. IAM supports GET and POST requests for all actions. That is, the API does not require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Signing Requests

Requests must be signed using an access key ID and a secret access key. We strongly recommend that you do not use your AWS account access key ID and secret access key for everyday work with IAM. You can use the access key ID and secret access key for an IAM user or you can use the AWS Security Token Service to generate temporary security credentials and use those to sign requests.

To sign requests, we recommend that you use Signature Version 4. If you have an existing application that uses Signature Version 2, you do not have to update it to use Signature Version 4. However, some operations now require Signature Version 4. The documentation for operations that require version 4 indicate this requirement.

Additional Resources

For more information, see the following:

  • AWS Security Credentials. This topic provides general information about the types of credentials used for accessing AWS.

  • IAM Best Practices. This topic presents a list of suggestions for using the IAM service to help secure your AWS resources.

  • Signing AWS API Requests. This set of topics walk you through the process of signing a request using an access key ID and secret access key.

" + "documentation":"AWS Identity and Access Management

AWS Identity and Access Management (IAM) is a web service for securely controlling access to AWS services. With IAM, you can centrally manage users, security credentials such as access keys, and permissions that control which AWS resources users and applications can access. For more information about IAM, see AWS Identity and Access Management (IAM) and the AWS Identity and Access Management User Guide.

" } diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index 0789f040de39..5e7f1ef43d57 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json index ea6a897302f2..1a8848de3cc5 100644 --- a/services/imagebuilder/src/main/resources/codegen-resources/service-2.json +++ b/services/imagebuilder/src/main/resources/codegen-resources/service-2.json @@ -51,7 +51,8 @@ {"shape":"CallRateLimitExceededException"}, {"shape":"InvalidVersionNumberException"}, {"shape":"ResourceInUseException"}, - {"shape":"InvalidParameterCombinationException"} + {"shape":"InvalidParameterCombinationException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new component that can be used to build, validate, test, and assess your image.

" }, @@ -73,7 +74,8 @@ {"shape":"CallRateLimitExceededException"}, {"shape":"ResourceInUseException"}, {"shape":"ResourceAlreadyExistsException"}, - {"shape":"InvalidParameterCombinationException"} + {"shape":"InvalidParameterCombinationException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new distribution configuration. Distribution configurations define and configure the outputs of your pipeline.

" }, @@ -93,7 +95,8 @@ {"shape":"IdempotentParameterMismatchException"}, {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"}, - {"shape":"ResourceInUseException"} + {"shape":"ResourceInUseException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new image. This request will create a new image along with all of the configured output resources defined in the distribution configuration.

" }, @@ -114,7 +117,8 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"}, {"shape":"ResourceInUseException"}, - {"shape":"ResourceAlreadyExistsException"} + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new image pipeline. Image pipelines enable you to automate the creation and distribution of images.

" }, @@ -136,7 +140,8 @@ {"shape":"CallRateLimitExceededException"}, {"shape":"InvalidVersionNumberException"}, {"shape":"ResourceInUseException"}, - {"shape":"ResourceAlreadyExistsException"} + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new image recipe. Image recipes define how images are configured, tested, and assessed.

" }, @@ -157,7 +162,8 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"}, {"shape":"ResourceInUseException"}, - {"shape":"ResourceAlreadyExistsException"} + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Creates a new infrastructure configuration. An infrastructure configuration defines the environment in which your image will be built and tested.

" }, @@ -533,7 +539,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Returns a list of distribution configurations.

" + "documentation":"

Returns a list of image build versions.

" }, "ListImagePipelineImages":{ "name":"ListImagePipelineImages", @@ -610,7 +616,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Returns the list of image build versions for the specified semantic version.

" + "documentation":"

Returns the list of images that you have access to.

" }, "ListInfrastructureConfigurations":{ "name":"ListInfrastructureConfigurations", @@ -664,7 +670,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Applies a policy to a component.

" + "documentation":"

Applies a policy to a component. We recommend that you call the RAM API CreateResourceShare to share resources. If you call the Image Builder API PutComponentPolicy, you must also call the RAM API PromoteResourceShareCreatedFromPolicy in order for the resource to be visible to all principals with whom the resource is shared.

" }, "PutImagePolicy":{ "name":"PutImagePolicy", @@ -684,7 +690,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Applies a policy to an image.

" + "documentation":"

Applies a policy to an image. We recommend that you call the RAM API CreateResourceShare to share resources. If you call the Image Builder API PutImagePolicy, you must also call the RAM API PromoteResourceShareCreatedFromPolicy in order for the resource to be visible to all principals with whom the resource is shared.

" }, "PutImageRecipePolicy":{ "name":"PutImageRecipePolicy", @@ -704,7 +710,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Applies a policy to an image recipe.

" + "documentation":"

Applies a policy to an image recipe. We recommend that you call the RAM API CreateResourceShare to share resources. If you call the Image Builder API PutImageRecipePolicy, you must also call the RAM API PromoteResourceShareCreatedFromPolicy in order for the resource to be visible to all principals with whom the resource is shared.

" }, "StartImagePipelineExecution":{ "name":"StartImagePipelineExecution", @@ -829,19 +835,19 @@ "members":{ "region":{ "shape":"NonEmptyString", - "documentation":"

The AWS Region of the EC2 AMI.

" + "documentation":"

The AWS Region of the EC2 AMI.

" }, "image":{ "shape":"NonEmptyString", - "documentation":"

The AMI ID of the EC2 AMI.

" + "documentation":"

The AMI ID of the EC2 AMI.

" }, "name":{ "shape":"NonEmptyString", - "documentation":"

The name of the EC2 AMI.

" + "documentation":"

The name of the EC2 AMI.

" }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the EC2 AMI.

" + "documentation":"

The description of the EC2 AMI.

" }, "state":{"shape":"ImageState"} }, @@ -852,15 +858,19 @@ "members":{ "name":{ "shape":"AmiNameString", - "documentation":"

The name of the distribution configuration.

" + "documentation":"

The name of the distribution configuration.

" }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the distribution configuration.

" + "documentation":"

The description of the distribution configuration.

" }, "amiTags":{ "shape":"TagMap", - "documentation":"

The tags to apply to AMIs distributed to this Region.

" + "documentation":"

The tags to apply to AMIs distributed to this Region.

" + }, + "kmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

The KMS key identifier used to encrypt the distributed image.

" }, "launchPermission":{ "shape":"LaunchPermissionConfiguration", @@ -973,6 +983,10 @@ "shape":"Platform", "documentation":"

The platform of the component.

" }, + "supportedOsVersions":{ + "shape":"OsVersionList", + "documentation":"

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

" + }, "owner":{ "shape":"NonEmptyString", "documentation":"

The owner of the component.

" @@ -1010,7 +1024,7 @@ "members":{ "componentArn":{ "shape":"ComponentVersionArnOrBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the component.

" + "documentation":"

The Amazon Resource Name (ARN) of the component.

" } }, "documentation":"

Configuration details of the component.

" @@ -1044,6 +1058,10 @@ "shape":"Platform", "documentation":"

The platform of the component.

" }, + "supportedOsVersions":{ + "shape":"OsVersionList", + "documentation":"

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

" + }, "type":{ "shape":"ComponentType", "documentation":"

The type of the component denotes whether the component is used to build the image or only to test it.

" @@ -1105,6 +1123,10 @@ "shape":"Platform", "documentation":"

The platform of the component.

" }, + "supportedOsVersions":{ + "shape":"OsVersionList", + "documentation":"

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

" + }, "type":{ "shape":"ComponentType", "documentation":"

The type of the component denotes whether the component is used to build the image or only to test it.

" @@ -1161,6 +1183,10 @@ "shape":"Platform", "documentation":"

The platform of the component.

" }, + "supportedOsVersions":{ + "shape":"OsVersionList", + "documentation":"

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

" + }, "data":{ "shape":"InlineComponentData", "documentation":"

The data of the component. Used to specify the data inline. Either data or uri can be used to specify the data within the component.

" @@ -1282,6 +1308,10 @@ "shape":"ImageTestsConfiguration", "documentation":"

The image test configuration of the image pipeline.

" }, + "enhancedImageMetadataEnabled":{ + "shape":"NullableBoolean", + "documentation":"

Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.

" + }, "schedule":{ "shape":"Schedule", "documentation":"

The schedule of the image pipeline.

" @@ -1338,27 +1368,31 @@ }, "semanticVersion":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the image recipe.

" + "documentation":"

The semantic version of the image recipe.

" }, "components":{ "shape":"ComponentConfigurationList", - "documentation":"

The components of the image recipe.

" + "documentation":"

The components of the image recipe.

" }, "parentImage":{ "shape":"NonEmptyString", - "documentation":"

The parent image of the image recipe.

" + "documentation":"

The parent image of the image recipe. The value of the string can be the ARN of the parent image or an AMI ID. The format for the ARN follows this example: arn:aws:imagebuilder:us-west-2:aws:image/windows-server-2016-english-full-base-x86/2019.x.x. The ARN ends with /20xx.x.x, which communicates to EC2 Image Builder that you want to use the latest AMI created in 20xx (year). You can provide the specific version that you want to use, or you can use a wildcard in all of the fields. If you enter an AMI ID for the string value, you must have access to the AMI, and the AMI must be in the same Region in which you are using Image Builder.

" }, "blockDeviceMappings":{ "shape":"InstanceBlockDeviceMappings", - "documentation":"

The block device mappings of the image recipe.

" + "documentation":"

The block device mappings of the image recipe.

" }, "tags":{ "shape":"TagMap", "documentation":"

The tags of the image recipe.

" }, + "workingDirectory":{ + "shape":"NonEmptyString", + "documentation":"

The working directory to be used during build and test workflows.

" + }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

", + "documentation":"

The idempotency token used to make this request idempotent.

", "idempotencyToken":true } } @@ -1368,15 +1402,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

" + "documentation":"

The idempotency token used to make this request idempotent.

" }, "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe that was created by this request.

" + "documentation":"

The Amazon Resource Name (ARN) of the image recipe that was created by this request.

" } } }, @@ -1404,6 +1438,10 @@ "shape":"ImageTestsConfiguration", "documentation":"

The image tests configuration of the image.

" }, + "enhancedImageMetadataEnabled":{ + "shape":"NullableBoolean", + "documentation":"

Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The tags of the image.

" @@ -1442,51 +1480,55 @@ "members":{ "name":{ "shape":"ResourceName", - "documentation":"

The name of the infrastructure configuration.

" + "documentation":"

The name of the infrastructure configuration.

" }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the infrastructure configuration.

" + "documentation":"

The description of the infrastructure configuration.

" }, "instanceTypes":{ "shape":"InstanceTypeList", - "documentation":"

The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

" + "documentation":"

The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

" }, "instanceProfileName":{ "shape":"NonEmptyString", - "documentation":"

The instance profile to associate with the instance used to customize your EC2 AMI.

" + "documentation":"

The instance profile to associate with the instance used to customize your EC2 AMI.

" }, "securityGroupIds":{ "shape":"SecurityGroupIds", - "documentation":"

The security group IDs to associate with the instance used to customize your EC2 AMI.

" + "documentation":"

The security group IDs to associate with the instance used to customize your EC2 AMI.

" }, "subnetId":{ "shape":"NonEmptyString", - "documentation":"

The subnet ID in which to place the instance used to customize your EC2 AMI.

" + "documentation":"

The subnet ID in which to place the instance used to customize your EC2 AMI.

" }, "logging":{ "shape":"Logging", - "documentation":"

The logging configuration of the infrastructure configuration.

" + "documentation":"

The logging configuration of the infrastructure configuration.

" }, "keyPair":{ "shape":"NonEmptyString", - "documentation":"

The key pair of the infrastructure configuration. This can be used to log on to and debug the instance used to create your image.

" + "documentation":"

The key pair of the infrastructure configuration. This can be used to log on to and debug the instance used to create your image.

" }, "terminateInstanceOnFailure":{ "shape":"NullableBoolean", - "documentation":"

The terminate instance on failure setting of the infrastructure configuration. Set to false if you want Image Builder to retain the instance used to configure your AMI if the build or test phase of your workflow fails.

" + "documentation":"

The terminate instance on failure setting of the infrastructure configuration. Set to false if you want Image Builder to retain the instance used to configure your AMI if the build or test phase of your workflow fails.

" }, "snsTopicArn":{ "shape":"SnsTopicArn", - "documentation":"

The SNS topic on which to send image build events.

" + "documentation":"

The SNS topic on which to send image build events.

" + }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

The tags attached to the resource created by Image Builder.

" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of the infrastructure configuration.

" + "documentation":"

The tags of the infrastructure configuration.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

", + "documentation":"

The idempotency token used to make this request idempotent.

", "idempotencyToken":true } } @@ -1496,15 +1538,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

" + "documentation":"

The idempotency token used to make this request idempotent.

" }, "infrastructureConfigurationArn":{ "shape":"InfrastructureConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that was created by this request.

" + "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that was created by this request.

" } } }, @@ -1515,7 +1557,7 @@ "members":{ "componentBuildVersionArn":{ "shape":"ComponentBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the component build version to delete.

", + "documentation":"

The Amazon Resource Name (ARN) of the component build version to delete.

", "location":"querystring", "locationName":"componentBuildVersionArn" } @@ -1526,11 +1568,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "componentBuildVersionArn":{ "shape":"ComponentBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the component build version that was deleted.

" + "documentation":"

The Amazon Resource Name (ARN) of the component build version that was deleted.

" } } }, @@ -1540,7 +1582,7 @@ "members":{ "distributionConfigurationArn":{ "shape":"DistributionConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration to delete.

", + "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration to delete.

", "location":"querystring", "locationName":"distributionConfigurationArn" } @@ -1551,11 +1593,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "distributionConfigurationArn":{ "shape":"DistributionConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that was deleted.

" + "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that was deleted.

" } } }, @@ -1565,7 +1607,7 @@ "members":{ "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline to delete.

", + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline to delete.

", "location":"querystring", "locationName":"imagePipelineArn" } @@ -1576,11 +1618,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that was deleted.

" + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that was deleted.

" } } }, @@ -1590,7 +1632,7 @@ "members":{ "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe to delete.

", + "documentation":"

The Amazon Resource Name (ARN) of the image recipe to delete.

", "location":"querystring", "locationName":"imageRecipeArn" } @@ -1601,11 +1643,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe that was deleted.

" + "documentation":"

The Amazon Resource Name (ARN) of the image recipe that was deleted.

" } } }, @@ -1615,7 +1657,7 @@ "members":{ "imageBuildVersionArn":{ "shape":"ImageBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image to delete.

", + "documentation":"

The Amazon Resource Name (ARN) of the image to delete.

", "location":"querystring", "locationName":"imageBuildVersionArn" } @@ -1626,11 +1668,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imageBuildVersionArn":{ "shape":"ImageBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image that was deleted.

" + "documentation":"

The Amazon Resource Name (ARN) of the image that was deleted.

" } } }, @@ -1640,7 +1682,7 @@ "members":{ "infrastructureConfigurationArn":{ "shape":"InfrastructureConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration to delete.

", + "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration to delete.

", "location":"querystring", "locationName":"infrastructureConfigurationArn" } @@ -1651,11 +1693,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "infrastructureConfigurationArn":{ "shape":"InfrastructureConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that was deleted.

" + "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that was deleted.

" } } }, @@ -1665,15 +1707,15 @@ "members":{ "region":{ "shape":"NonEmptyString", - "documentation":"

The target Region.

" + "documentation":"

The target Region.

" }, "amiDistributionConfiguration":{ "shape":"AmiDistributionConfiguration", - "documentation":"

The specific AMI settings (for example, launch permissions, AMI tags).

" + "documentation":"

The specific AMI settings (for example, launch permissions, AMI tags).

" }, "licenseConfigurationArns":{ "shape":"ArnList", - "documentation":"

The License Manager Configuration to associate with the AMI in the specified Region.

" + "documentation":"

The License Manager Configuration to associate with the AMI in the specified Region.

" } }, "documentation":"

Defines the settings for a specific Region.

" @@ -1829,14 +1871,14 @@ "members":{ "name":{ "shape":"FilterName", - "documentation":"

The name of the filter. Filter names are case-sensitive.

" + "documentation":"

The name of the filter. Filter names are case-sensitive.

" }, "values":{ "shape":"FilterValues", - "documentation":"

The filter values. Filter values are case-sensitive.

" + "documentation":"

The filter values. Filter values are case-sensitive.

" } }, - "documentation":"

A filter name and value pair that is used to return a more specific list of results from a list operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

" + "documentation":"

A filter name and value pair that is used to return a more specific list of results from a list operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

" }, "FilterList":{ "type":"list", @@ -1873,7 +1915,7 @@ "members":{ "componentArn":{ "shape":"ComponentBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the component whose policy you want to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the component whose policy you want to retrieve.

", "location":"querystring", "locationName":"componentArn" } @@ -1884,11 +1926,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "policy":{ "shape":"ResourcePolicyDocument", - "documentation":"

The component policy.

" + "documentation":"

The component policy.

" } } }, @@ -1897,8 +1939,8 @@ "required":["componentBuildVersionArn"], "members":{ "componentBuildVersionArn":{ - "shape":"ComponentBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the component that you want to retrieve. Regex requires \"/\\d+$\" suffix.

", + "shape":"ComponentVersionArnOrBuildVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the component that you want to retrieve. Regex requires \"/\\d+$\" suffix.

", "location":"querystring", "locationName":"componentBuildVersionArn" } @@ -1909,11 +1951,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "component":{ "shape":"Component", - "documentation":"

The component object associated with the specified ARN.

" + "documentation":"

The component object associated with the specified ARN.

" } } }, @@ -1923,7 +1965,7 @@ "members":{ "distributionConfigurationArn":{ "shape":"DistributionConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that you want to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that you want to retrieve.

", "location":"querystring", "locationName":"distributionConfigurationArn" } @@ -1934,11 +1976,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "distributionConfiguration":{ "shape":"DistributionConfiguration", - "documentation":"

The distribution configuration object.

" + "documentation":"

The distribution configuration object.

" } } }, @@ -1948,7 +1990,7 @@ "members":{ "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you want to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you want to retrieve.

", "location":"querystring", "locationName":"imagePipelineArn" } @@ -1959,11 +2001,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imagePipeline":{ "shape":"ImagePipeline", - "documentation":"

The image pipeline object.

" + "documentation":"

The image pipeline object.

" } } }, @@ -1973,7 +2015,7 @@ "members":{ "imageArn":{ "shape":"ImageBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image whose policy you want to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the image whose policy you want to retrieve.

", "location":"querystring", "locationName":"imageArn" } @@ -1984,11 +2026,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "policy":{ "shape":"ResourcePolicyDocument", - "documentation":"

The image policy object.

" + "documentation":"

The image policy object.

" } } }, @@ -1998,7 +2040,7 @@ "members":{ "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe whose policy you want to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the image recipe whose policy you want to retrieve.

", "location":"querystring", "locationName":"imageRecipeArn" } @@ -2009,11 +2051,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "policy":{ "shape":"ResourcePolicyDocument", - "documentation":"

The image recipe policy object.

" + "documentation":"

The image recipe policy object.

" } } }, @@ -2023,7 +2065,7 @@ "members":{ "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe that you want to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the image recipe that you want to retrieve.

", "location":"querystring", "locationName":"imageRecipeArn" } @@ -2034,11 +2076,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imageRecipe":{ "shape":"ImageRecipe", - "documentation":"

The image recipe object.

" + "documentation":"

The image recipe object.

" } } }, @@ -2047,8 +2089,8 @@ "required":["imageBuildVersionArn"], "members":{ "imageBuildVersionArn":{ - "shape":"ImageBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image that you want to retrieve.

", + "shape":"ImageVersionArnOrBuildVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the image that you want to retrieve.

", "location":"querystring", "locationName":"imageBuildVersionArn" } @@ -2059,11 +2101,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "image":{ "shape":"Image", - "documentation":"

The image object.

" + "documentation":"

The image object.

" } } }, @@ -2085,11 +2127,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "infrastructureConfiguration":{ "shape":"InfrastructureConfiguration", - "documentation":"

The infrastructure configuration object.

" + "documentation":"

The infrastructure configuration object.

" } }, "documentation":"

GetInfrastructureConfiguration response object.

" @@ -2122,6 +2164,14 @@ "shape":"Platform", "documentation":"

The platform of the image.

" }, + "enhancedImageMetadataEnabled":{ + "shape":"NullableBoolean", + "documentation":"

Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.

" + }, + "osVersion":{ + "shape":"OsVersion", + "documentation":"

The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.

" + }, "state":{ "shape":"ImageState", "documentation":"

The state of the image.

" @@ -2140,7 +2190,7 @@ }, "infrastructureConfiguration":{ "shape":"InfrastructureConfiguration", - "documentation":"

The infrastructure used when creating this image.

" + "documentation":"

The infrastructure used when creating this image.

" }, "distributionConfiguration":{ "shape":"DistributionConfiguration", @@ -2192,6 +2242,10 @@ "shape":"Platform", "documentation":"

The platform of the image pipeline.

" }, + "enhancedImageMetadataEnabled":{ + "shape":"NullableBoolean", + "documentation":"

Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.

" + }, "imageRecipeArn":{ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the image recipe associated with this image pipeline.

" @@ -2293,6 +2347,10 @@ "tags":{ "shape":"TagMap", "documentation":"

The tags of the image recipe.

" + }, + "workingDirectory":{ + "shape":"NonEmptyString", + "documentation":"

The working directory to be used during build and test workflows.

" } }, "documentation":"

An image recipe.

" @@ -2344,11 +2402,11 @@ "members":{ "status":{ "shape":"ImageStatus", - "documentation":"

The status of the image.

" + "documentation":"

The status of the image.

" }, "reason":{ "shape":"NonEmptyString", - "documentation":"

The reason for the image's status.

" + "documentation":"

The reason for the image's status.

" } }, "documentation":"

Image state shows the image status and the reason for that status.

" @@ -2388,6 +2446,10 @@ "shape":"Platform", "documentation":"

The platform of the image.

" }, + "osVersion":{ + "shape":"OsVersion", + "documentation":"

The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.

" + }, "state":{ "shape":"ImageState", "documentation":"

The state of the image.

" @@ -2453,6 +2515,10 @@ "shape":"Platform", "documentation":"

The platform of the image semantic version.

" }, + "osVersion":{ + "shape":"OsVersion", + "documentation":"

The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.

" + }, "owner":{ "shape":"NonEmptyString", "documentation":"

The owner of the image semantic version.

" @@ -2468,6 +2534,10 @@ "type":"string", "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):image/[a-z0-9-_]+/\\d+\\.\\d+\\.\\d+$" }, + "ImageVersionArnOrBuildVersionArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:imagebuilder:[^:]+:(?:\\d{12}|aws):image/[a-z0-9-_]+/(?:(?:(\\d+|x)\\.(\\d+|x)\\.(\\d+|x))|(?:\\d+\\.\\d+\\.\\d+/\\d+))$" + }, "ImageVersionList":{ "type":"list", "member":{"shape":"ImageVersion"} @@ -2497,7 +2567,7 @@ }, "changeDescription":{ "shape":"NonEmptyString", - "documentation":"

The change description of the component. Describes what change has been made in this version, or what makes this version different from other versions of this component.

" + "documentation":"

The change description of the component. Describes what change has been made in this version, or what makes this version different from other versions of this component.

" }, "type":{ "shape":"ComponentType", @@ -2505,11 +2575,11 @@ }, "format":{ "shape":"ComponentFormat", - "documentation":"

The format of the resource that you want to import as a component.

" + "documentation":"

The format of the resource that you want to import as a component.

" }, "platform":{ "shape":"Platform", - "documentation":"

The platform of the component.

" + "documentation":"

The platform of the component.

" }, "data":{ "shape":"NonEmptyString", @@ -2521,15 +2591,15 @@ }, "kmsKeyId":{ "shape":"NonEmptyString", - "documentation":"

The ID of the KMS key that should be used to encrypt this component.

" + "documentation":"

The ID of the KMS key that should be used to encrypt this component.

" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of the component.

" + "documentation":"

The tags of the component.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token of the component.

", + "documentation":"

The idempotency token of the component.

", "idempotencyToken":true } } @@ -2539,15 +2609,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

" + "documentation":"

The idempotency token used to make this request idempotent.

" }, "componentBuildVersionArn":{ "shape":"ComponentBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the imported component.

" + "documentation":"

The Amazon Resource Name (ARN) of the imported component.

" } } }, @@ -2606,6 +2676,10 @@ "shape":"DateTime", "documentation":"

The date on which the infrastructure configuration was last updated.

" }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

The tags attached to the resource created by Image Builder.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The tags of the infrastructure configuration.

" @@ -2640,6 +2714,10 @@ "shape":"DateTime", "documentation":"

The date on which the infrastructure configuration was last updated.

" }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

The tags attached to the image created by Image Builder.

" + }, "tags":{ "shape":"TagMap", "documentation":"

The tags of the infrastructure configuration.

" @@ -2746,14 +2824,14 @@ "members":{ "userIds":{ "shape":"AccountList", - "documentation":"

The AWS account ID.

" + "documentation":"

The AWS account ID.

" }, "userGroups":{ "shape":"StringList", "documentation":"

The name of the group.

" } }, - "documentation":"

Describes the configuration for a launch permission. The launch permission modification request is sent to the EC2 ModifyImageAttribute API on behalf of the user for each Region they have selected to distribute the AMI.

" + "documentation":"

Describes the configuration for a launch permission. The launch permission modification request is sent to the EC2 ModifyImageAttribute API on behalf of the user for each Region they have selected to distribute the AMI. To make an AMI public, set the launch permission authorized accounts to all. See the examples for making an AMI public at EC2 ModifyImageAttribute.

" }, "ListComponentBuildVersionsRequest":{ "type":"structure", @@ -2761,16 +2839,16 @@ "members":{ "componentVersionArn":{ "shape":"ComponentVersionArn", - "documentation":"

The component version Amazon Resource Name (ARN) whose versions you want to list.

" + "documentation":"

The component version Amazon Resource Name (ARN) whose versions you want to list.

" }, "maxResults":{ "shape":"RestrictedInteger", - "documentation":"

The maximum items to return in a request.

", + "documentation":"

The maximum items to return in a request.

", "box":true }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } }, @@ -2779,15 +2857,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "componentSummaryList":{ "shape":"ComponentSummaryList", - "documentation":"

The list of component summaries for the specified semantic version.

" + "documentation":"

The list of component summaries for the specified semantic version.

" }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2796,20 +2874,20 @@ "members":{ "owner":{ "shape":"Ownership", - "documentation":"

The owner defines which components you want to list. By default, this request will only show components owned by your account. You can use this field to specify if you want to view components owned by yourself, by Amazon, or those components that have been shared with you by other customers.

" + "documentation":"

The owner defines which components you want to list. By default, this request will only show components owned by your account. You can use this field to specify if you want to view components owned by yourself, by Amazon, or those components that have been shared with you by other customers.

" }, "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", - "documentation":"

The maximum items to return in a request.

", + "documentation":"

The maximum items to return in a request.

", "box":true }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } }, @@ -2818,15 +2896,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "componentVersionList":{ "shape":"ComponentVersionList", - "documentation":"

The list of component semantic versions.

" + "documentation":"

The list of component semantic versions.

" }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2835,16 +2913,16 @@ "members":{ "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", - "documentation":"

The maximum items to return in a request.

", + "documentation":"

The maximum items to return in a request.

", "box":true }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } }, @@ -2853,15 +2931,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "distributionConfigurationSummaryList":{ "shape":"DistributionConfigurationSummaryList", - "documentation":"

The list of distributions.

" + "documentation":"

The list of distributions.

" }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2871,20 +2949,20 @@ "members":{ "imageVersionArn":{ "shape":"ImageVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image whose build versions you want to retrieve.

" + "documentation":"

The Amazon Resource Name (ARN) of the image whose build versions you want to retrieve.

" }, "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", - "documentation":"

The maximum items to return in a request.

", + "documentation":"

The maximum items to return in a request.

", "box":true }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } }, @@ -2893,15 +2971,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imageSummaryList":{ "shape":"ImageSummaryList", - "documentation":"

The list of image build versions.

" + "documentation":"

The list of image build versions.

" }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2911,20 +2989,20 @@ "members":{ "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline whose images you want to view.

" + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline whose images you want to view.

" }, "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", - "documentation":"

The maximum items to return in a request.

", + "documentation":"

The maximum items to return in a request.

", "box":true }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } }, @@ -2933,15 +3011,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imageSummaryList":{ "shape":"ImageSummaryList", - "documentation":"

The list of images built by this pipeline.

" + "documentation":"

The list of images built by this pipeline.

" }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2950,16 +3028,16 @@ "members":{ "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", - "documentation":"

The maximum items to return in a request.

", + "documentation":"

The maximum items to return in a request.

", "box":true }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } }, @@ -2968,15 +3046,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imagePipelineList":{ "shape":"ImagePipelineList", - "documentation":"

The list of image pipelines.

" + "documentation":"

The list of image pipelines.

" }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -2985,20 +3063,20 @@ "members":{ "owner":{ "shape":"Ownership", - "documentation":"

The owner defines which image recipes you want to list. By default, this request will only show image recipes owned by your account. You can use this field to specify if you want to view image recipes owned by yourself, by Amazon, or those image recipes that have been shared with you by other customers.

" + "documentation":"

The owner defines which image recipes you want to list. By default, this request will only show image recipes owned by your account. You can use this field to specify if you want to view image recipes owned by yourself, by Amazon, or those image recipes that have been shared with you by other customers.

" }, "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", - "documentation":"

The maximum items to return in a request.

", + "documentation":"

The maximum items to return in a request.

", "box":true }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } }, @@ -3007,15 +3085,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imageRecipeSummaryList":{ "shape":"ImageRecipeSummaryList", - "documentation":"

The list of image pipelines.

" + "documentation":"

The list of image pipelines.

" }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -3024,20 +3102,20 @@ "members":{ "owner":{ "shape":"Ownership", - "documentation":"

The owner defines which images you want to list. By default, this request will only show images owned by your account. You can use this field to specify if you want to view images owned by yourself, by Amazon, or those images that have been shared with you by other customers.

" + "documentation":"

The owner defines which images you want to list. By default, this request will only show images owned by your account. You can use this field to specify if you want to view images owned by yourself, by Amazon, or those images that have been shared with you by other customers.

" }, "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", - "documentation":"

The maximum items to return in a request.

", + "documentation":"

The maximum items to return in a request.

", "box":true }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } }, @@ -3046,15 +3124,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imageVersionList":{ "shape":"ImageVersionList", - "documentation":"

The list of image semantic versions.

" + "documentation":"

The list of image semantic versions.

" }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -3063,16 +3141,16 @@ "members":{ "filters":{ "shape":"FilterList", - "documentation":"

The filters.

" + "documentation":"

The filters.

" }, "maxResults":{ "shape":"RestrictedInteger", - "documentation":"

The maximum items to return in a request.

", + "documentation":"

The maximum items to return in a request.

", "box":true }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" } } }, @@ -3081,15 +3159,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "infrastructureConfigurationSummaryList":{ "shape":"InfrastructureConfigurationSummaryList", - "documentation":"

The list of infrastructure configurations.

" + "documentation":"

The list of infrastructure configurations.

" }, "nextToken":{ "shape":"NonEmptyString", - "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" + "documentation":"

The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.

" } } }, @@ -3099,7 +3177,7 @@ "members":{ "resourceArn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.

", "location":"uri", "locationName":"resourceArn" } @@ -3110,7 +3188,7 @@ "members":{ "tags":{ "shape":"TagMap", - "documentation":"

The tags for the specified resource.

" + "documentation":"

The tags for the specified resource.

" } } }, @@ -3130,6 +3208,16 @@ "min":1 }, "NullableBoolean":{"type":"boolean"}, + "OsVersion":{ + "type":"string", + "min":1 + }, + "OsVersionList":{ + "type":"list", + "member":{"shape":"OsVersion"}, + "max":25, + "min":1 + }, "OutputResources":{ "type":"structure", "members":{ @@ -3178,11 +3266,11 @@ "members":{ "componentArn":{ "shape":"ComponentBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the component that this policy should be applied to.

" + "documentation":"

The Amazon Resource Name (ARN) of the component that this policy should be applied to.

" }, "policy":{ "shape":"ResourcePolicyDocument", - "documentation":"

The policy to apply.

" + "documentation":"

The policy to apply.

" } } }, @@ -3191,11 +3279,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "componentArn":{ "shape":"ComponentBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the component that this policy was applied to.

" + "documentation":"

The Amazon Resource Name (ARN) of the component that this policy was applied to.

" } } }, @@ -3208,11 +3296,11 @@ "members":{ "imageArn":{ "shape":"ImageBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image that this policy should be applied to.

" + "documentation":"

The Amazon Resource Name (ARN) of the image that this policy should be applied to.

" }, "policy":{ "shape":"ResourcePolicyDocument", - "documentation":"

The policy to apply.

" + "documentation":"

The policy to apply.

" } } }, @@ -3221,11 +3309,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imageArn":{ "shape":"ImageBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image that this policy was applied to.

" + "documentation":"

The Amazon Resource Name (ARN) of the image that this policy was applied to.

" } } }, @@ -3238,11 +3326,11 @@ "members":{ "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe that this policy should be applied to.

" + "documentation":"

The Amazon Resource Name (ARN) of the image recipe that this policy should be applied to.

" }, "policy":{ "shape":"ResourcePolicyDocument", - "documentation":"

The policy to apply.

" + "documentation":"

The policy to apply.

" } } }, @@ -3251,11 +3339,11 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe that this policy was applied to.

" + "documentation":"

The Amazon Resource Name (ARN) of the image recipe that this policy was applied to.

" } } }, @@ -3304,6 +3392,13 @@ "max":30000, "min":1 }, + "ResourceTagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":30, + "min":1 + }, "RestrictedInteger":{ "type":"integer", "max":25, @@ -3328,14 +3423,14 @@ "members":{ "scheduleExpression":{ "shape":"NonEmptyString", - "documentation":"

The expression determines how often EC2 Image Builder evaluates your pipelineExecutionStartCondition.

" + "documentation":"

The expression determines how often EC2 Image Builder evaluates your pipelineExecutionStartCondition.

" }, "pipelineExecutionStartCondition":{ "shape":"PipelineExecutionStartCondition", - "documentation":"

The condition configures when the pipeline should trigger a new image build. When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, EC2 Image Builder will build a new image only when there are known changes pending. When it is set to EXPRESSION_MATCH_ONLY, it will build a new image every time the CRON expression matches the current time.

" + "documentation":"

The condition configures when the pipeline should trigger a new image build. When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, EC2 Image Builder will build a new image only when there are known changes pending. When it is set to EXPRESSION_MATCH_ONLY, it will build a new image every time the CRON expression matches the current time.

" } }, - "documentation":"

A schedule configures how often and when a pipeline will automatically create a new image.

" + "documentation":"

A schedule configures how often and when a pipeline will automatically create a new image.

" }, "SecurityGroupIds":{ "type":"list", @@ -3350,6 +3445,15 @@ "error":{"httpStatusCode":500}, "exception":true }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

You have exceeded the number of permitted resources or operations for this service. For service quotas, see EC2 Image Builder endpoints and quotas.

", + "error":{"httpStatusCode":402}, + "exception":true + }, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -3372,11 +3476,11 @@ "members":{ "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you want to manually invoke.

" + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you want to manually invoke.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

", + "documentation":"

The idempotency token used to make this request idempotent.

", "idempotencyToken":true } } @@ -3386,15 +3490,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

" + "documentation":"

The idempotency token used to make this request idempotent.

" }, "imageBuildVersionArn":{ "shape":"ImageBuildVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the image that was created by this request.

" + "documentation":"

The Amazon Resource Name (ARN) of the image that was created by this request.

" } } }, @@ -3430,13 +3534,13 @@ "members":{ "resourceArn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to tag.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to tag.

", "location":"uri", "locationName":"resourceArn" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags to apply to the resource.

" + "documentation":"

The tags to apply to the resource.

" } } }, @@ -3458,13 +3562,13 @@ "members":{ "resourceArn":{ "shape":"ImageBuilderArn", - "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to untag.

", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to untag.

", "location":"uri", "locationName":"resourceArn" }, "tagKeys":{ "shape":"TagKeyList", - "documentation":"

The tag keys to remove from the resource.

", + "documentation":"

The tag keys to remove from the resource.

", "location":"querystring", "locationName":"tagKeys" } @@ -3485,19 +3589,19 @@ "members":{ "distributionConfigurationArn":{ "shape":"DistributionConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that you want to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that you want to update.

" }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the distribution configuration.

" + "documentation":"

The description of the distribution configuration.

" }, "distributions":{ "shape":"DistributionList", - "documentation":"

The distributions of the distribution configuration.

" + "documentation":"

The distributions of the distribution configuration.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token of the distribution configuration.

", + "documentation":"

The idempotency token of the distribution configuration.

", "idempotencyToken":true } } @@ -3507,15 +3611,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

" + "documentation":"

The idempotency token used to make this request idempotent.

" }, "distributionConfigurationArn":{ "shape":"DistributionConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that was updated by this request.

" + "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that was updated by this request.

" } } }, @@ -3530,39 +3634,43 @@ "members":{ "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you want to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that you want to update.

" }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the image pipeline.

" + "documentation":"

The description of the image pipeline.

" }, "imageRecipeArn":{ "shape":"ImageRecipeArn", - "documentation":"

The Amazon Resource Name (ARN) of the image recipe that will be used to configure images updated by this image pipeline.

" + "documentation":"

The Amazon Resource Name (ARN) of the image recipe that will be used to configure images updated by this image pipeline.

" }, "infrastructureConfigurationArn":{ "shape":"InfrastructureConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that will be used to build images updated by this image pipeline.

" + "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that will be used to build images updated by this image pipeline.

" }, "distributionConfigurationArn":{ "shape":"DistributionConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that will be used to configure and distribute images updated by this image pipeline.

" + "documentation":"

The Amazon Resource Name (ARN) of the distribution configuration that will be used to configure and distribute images updated by this image pipeline.

" }, "imageTestsConfiguration":{ "shape":"ImageTestsConfiguration", - "documentation":"

The image test configuration of the image pipeline.

" + "documentation":"

The image test configuration of the image pipeline.

" + }, + "enhancedImageMetadataEnabled":{ + "shape":"NullableBoolean", + "documentation":"

Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.

" }, "schedule":{ "shape":"Schedule", - "documentation":"

The schedule of the image pipeline.

" + "documentation":"

The schedule of the image pipeline.

" }, "status":{ "shape":"PipelineStatus", - "documentation":"

The status of the image pipeline.

" + "documentation":"

The status of the image pipeline.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

", + "documentation":"

The idempotency token used to make this request idempotent.

", "idempotencyToken":true } } @@ -3572,15 +3680,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

" + "documentation":"

The idempotency token used to make this request idempotent.

" }, "imagePipelineArn":{ "shape":"ImagePipelineArn", - "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that was updated by this request.

" + "documentation":"

The Amazon Resource Name (ARN) of the image pipeline that was updated by this request.

" } } }, @@ -3594,48 +3702,52 @@ "members":{ "infrastructureConfigurationArn":{ "shape":"InfrastructureConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that you want to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that you want to update.

" }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the infrastructure configuration.

" + "documentation":"

The description of the infrastructure configuration.

" }, "instanceTypes":{ "shape":"InstanceTypeList", - "documentation":"

The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

" + "documentation":"

The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

" }, "instanceProfileName":{ "shape":"NonEmptyString", - "documentation":"

The instance profile to associate with the instance used to customize your EC2 AMI.

" + "documentation":"

The instance profile to associate with the instance used to customize your EC2 AMI.

" }, "securityGroupIds":{ "shape":"SecurityGroupIds", - "documentation":"

The security group IDs to associate with the instance used to customize your EC2 AMI.

" + "documentation":"

The security group IDs to associate with the instance used to customize your EC2 AMI.

" }, "subnetId":{ "shape":"NonEmptyString", - "documentation":"

The subnet ID to place the instance used to customize your EC2 AMI in.

" + "documentation":"

The subnet ID to place the instance used to customize your EC2 AMI in.

" }, "logging":{ "shape":"Logging", - "documentation":"

The logging configuration of the infrastructure configuration.

" + "documentation":"

The logging configuration of the infrastructure configuration.

" }, "keyPair":{ "shape":"NonEmptyString", - "documentation":"

The key pair of the infrastructure configuration. This can be used to log on to and debug the instance used to create your image.

" + "documentation":"

The key pair of the infrastructure configuration. This can be used to log on to and debug the instance used to create your image.

" }, "terminateInstanceOnFailure":{ "shape":"NullableBoolean", - "documentation":"

The terminate instance on failure setting of the infrastructure configuration. Set to false if you want Image Builder to retain the instance used to configure your AMI if the build or test phase of your workflow fails.

" + "documentation":"

The terminate instance on failure setting of the infrastructure configuration. Set to false if you want Image Builder to retain the instance used to configure your AMI if the build or test phase of your workflow fails.

" }, "snsTopicArn":{ "shape":"SnsTopicArn", - "documentation":"

The SNS topic on which to send image build events.

" + "documentation":"

The SNS topic on which to send image build events.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

", + "documentation":"

The idempotency token used to make this request idempotent.

", "idempotencyToken":true + }, + "resourceTags":{ + "shape":"ResourceTagMap", + "documentation":"

The tags attached to the resource created by Image Builder.

" } } }, @@ -3644,15 +3756,15 @@ "members":{ "requestId":{ "shape":"NonEmptyString", - "documentation":"

The request ID that uniquely identifies this request.

" + "documentation":"

The request ID that uniquely identifies this request.

" }, "clientToken":{ "shape":"ClientToken", - "documentation":"

The idempotency token used to make this request idempotent.

" + "documentation":"

The idempotency token used to make this request idempotent.

" }, "infrastructureConfigurationArn":{ "shape":"InfrastructureConfigurationArn", - "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that was updated by this request.

" + "documentation":"

The Amazon Resource Name (ARN) of the infrastructure configuration that was updated by this request.

" } } }, diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 553015954db2..186702d22bef 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/iot/pom.xml b/services/iot/pom.xml index 88b2ee9604b1..3c80d8307fe5 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot/src/main/resources/codegen-resources/service-2.json b/services/iot/src/main/resources/codegen-resources/service-2.json index c6676abb1da1..d76156a01498 100644 --- a/services/iot/src/main/resources/codegen-resources/service-2.json +++ b/services/iot/src/main/resources/codegen-resources/service-2.json @@ -1361,6 +1361,7 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"}, {"shape":"ServiceUnavailableException"}, {"shape":"InternalFailureException"} @@ -2813,6 +2814,26 @@ ], "documentation":"

Registers a device certificate with AWS IoT. If you have more than one CA certificate that has the same subject field, you must specify the CA certificate that was used to sign the device certificate being registered.

" }, + "RegisterCertificateWithoutCA":{ + "name":"RegisterCertificateWithoutCA", + "http":{ + "method":"POST", + "requestUri":"/certificate/register-no-ca" + }, + "input":{"shape":"RegisterCertificateWithoutCARequest"}, + "output":{"shape":"RegisterCertificateWithoutCAResponse"}, + "errors":[ + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"CertificateStateException"}, + {"shape":"CertificateValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Register a certificate that does not have a certificate authority (CA).

" + }, "RegisterThing":{ "name":"RegisterThing", "http":{ @@ -2880,7 +2901,7 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Remove the specified thing from the specified group.

" + "documentation":"

Remove the specified thing from the specified group.

You must specify either a thingGroupArn or a thingGroupName to identify the thing group and either a thingArn or a thingName to identify the thing to remove from the thing group.

" }, "ReplaceTopicRule":{ "name":"ReplaceTopicRule", @@ -3539,10 +3560,10 @@ "members":{ "criteriaList":{ "shape":"AbortCriteriaList", - "documentation":"

The list of abort criteria to define rules to abort the job.

" + "documentation":"

The list of criteria that determine when and how to abort the job.

" } }, - "documentation":"

Details of abort criteria to abort the job.

" + "documentation":"

The criteria that determine when and how a job abort takes place.

" }, "AbortCriteria":{ "type":"structure", @@ -3555,22 +3576,22 @@ "members":{ "failureType":{ "shape":"JobExecutionFailureType", - "documentation":"

The type of job execution failure to define a rule to initiate a job abort.

" + "documentation":"

The type of job execution failures that can initiate a job abort.

" }, "action":{ "shape":"AbortAction", - "documentation":"

The type of abort action to initiate a job abort.

" + "documentation":"

The type of job action to take to initiate the job abort.

" }, "thresholdPercentage":{ "shape":"AbortThresholdPercentage", - "documentation":"

The threshold as a percentage of the total number of executed things that will initiate a job abort.

AWS IoT supports up to two digits after the decimal (for example, 10.9 and 10.99, but not 10.999).

" + "documentation":"

The minimum percentage of job execution failures that must occur to initiate the job abort.

AWS IoT supports up to two digits after the decimal (for example, 10.9 and 10.99, but not 10.999).

" }, "minNumberOfExecutedThings":{ "shape":"MinimumNumberOfExecutedThings", - "documentation":"

Minimum number of executed things before evaluating an abort rule.

" + "documentation":"

The minimum number of things which must receive job execution notifications before the job can be aborted.

" } }, - "documentation":"

Details of abort criteria to define rules to abort the job.

" + "documentation":"

The criteria that determine when and how a job abort takes place.

" }, "AbortCriteriaList":{ "type":"list", @@ -4011,7 +4032,7 @@ }, "target":{ "shape":"PolicyTarget", - "documentation":"

The identity to which the policy is attached.

" + "documentation":"

The identity to which the policy is attached.

" } } }, @@ -4467,6 +4488,7 @@ }, "AuthInfo":{ "type":"structure", + "required":["resources"], "members":{ "actionType":{ "shape":"ActionType", @@ -4623,16 +4645,108 @@ "AwsIotJobArn":{"type":"string"}, "AwsIotJobId":{"type":"string"}, "AwsIotSqlVersion":{"type":"string"}, + "AwsJobAbortConfig":{ + "type":"structure", + "required":["abortCriteriaList"], + "members":{ + "abortCriteriaList":{ + "shape":"AwsJobAbortCriteriaList", + "documentation":"

The list of criteria that determine when and how to abort the job.

" + } + }, + "documentation":"

The criteria that determine when and how a job abort takes place.

" + }, + "AwsJobAbortCriteria":{ + "type":"structure", + "required":[ + "failureType", + "action", + "thresholdPercentage", + "minNumberOfExecutedThings" + ], + "members":{ + "failureType":{ + "shape":"AwsJobAbortCriteriaFailureType", + "documentation":"

The type of job execution failures that can initiate a job abort.

" + }, + "action":{ + "shape":"AwsJobAbortCriteriaAbortAction", + "documentation":"

The type of job action to take to initiate the job abort.

" + }, + "thresholdPercentage":{ + "shape":"AwsJobAbortCriteriaAbortThresholdPercentage", + "documentation":"

The minimum percentage of job execution failures that must occur to initiate the job abort.

AWS IoT supports up to two digits after the decimal (for example, 10.9 and 10.99, but not 10.999).

" + }, + "minNumberOfExecutedThings":{ + "shape":"AwsJobAbortCriteriaMinimumNumberOfExecutedThings", + "documentation":"

The minimum number of things which must receive job execution notifications before the job can be aborted.

" + } + }, + "documentation":"

The criteria that determine when and how a job abort takes place.

" + }, + "AwsJobAbortCriteriaAbortAction":{ + "type":"string", + "enum":["CANCEL"] + }, + "AwsJobAbortCriteriaAbortThresholdPercentage":{ + "type":"double", + "max":100 + }, + "AwsJobAbortCriteriaFailureType":{ + "type":"string", + "enum":[ + "FAILED", + "REJECTED", + "TIMED_OUT", + "ALL" + ] + }, + "AwsJobAbortCriteriaList":{ + "type":"list", + "member":{"shape":"AwsJobAbortCriteria"}, + "min":1 + }, + "AwsJobAbortCriteriaMinimumNumberOfExecutedThings":{ + "type":"integer", + "min":1 + }, "AwsJobExecutionsRolloutConfig":{ "type":"structure", "members":{ "maximumPerMinute":{ "shape":"MaximumPerMinute", "documentation":"

The maximum number of OTA update job executions started per minute.

" + }, + "exponentialRate":{ + "shape":"AwsJobExponentialRolloutRate", + "documentation":"

The rate of increase for a job rollout. This parameter allows you to define an exponential rate increase for a job rollout.

" } }, "documentation":"

Configuration for the rollout of OTA updates.

" }, + "AwsJobExponentialRolloutRate":{ + "type":"structure", + "required":[ + "baseRatePerMinute", + "incrementFactor", + "rateIncreaseCriteria" + ], + "members":{ + "baseRatePerMinute":{ + "shape":"AwsJobRolloutRatePerMinute", + "documentation":"

The minimum number of things that will be notified of a pending job, per minute, at the start of the job rollout. This is the initial rate of the rollout.

" + }, + "incrementFactor":{ + "shape":"AwsJobRolloutIncrementFactor", + "documentation":"

The rate of increase for a job rollout. The number of things notified is multiplied by this factor.

" + }, + "rateIncreaseCriteria":{ + "shape":"AwsJobRateIncreaseCriteria", + "documentation":"

The criteria to initiate the increase in rate of rollout for a job.

AWS IoT supports up to one digit after the decimal (for example, 1.5, but not 1.55).

" + } + }, + "documentation":"

The rate of increase for a job rollout. This parameter allows you to define an exponential rate increase for a job rollout.

" + }, "AwsJobPresignedUrlConfig":{ "type":"structure", "members":{ @@ -4643,6 +4757,41 @@ }, "documentation":"

Configuration information for pre-signed URLs. Valid when protocols contains HTTP.

" }, + "AwsJobRateIncreaseCriteria":{ + "type":"structure", + "members":{ + "numberOfNotifiedThings":{ + "shape":"AwsJobRateIncreaseCriteriaNumberOfThings", + "documentation":"

When this number of things have been notified, it will initiate an increase in the rollout rate.

" + }, + "numberOfSucceededThings":{ + "shape":"AwsJobRateIncreaseCriteriaNumberOfThings", + "documentation":"

When this number of things have succeeded in their job execution, it will initiate an increase in the rollout rate.

" + } + }, + "documentation":"

The criteria to initiate the increase in rate of rollout for a job.

" + }, + "AwsJobRateIncreaseCriteriaNumberOfThings":{ + "type":"integer", + "min":1 + }, + "AwsJobRolloutIncrementFactor":{"type":"double"}, + "AwsJobRolloutRatePerMinute":{ + "type":"integer", + "max":1000, + "min":1 + }, + "AwsJobTimeoutConfig":{ + "type":"structure", + "members":{ + "inProgressTimeoutInMinutes":{ + "shape":"AwsJobTimeoutInProgressTimeoutInMinutes", + "documentation":"

Specifies the amount of time, in minutes, this device has to finish execution of this job. The timeout interval can be anywhere between 1 minute and 7 days (1 to 10080 minutes). The in progress timer can't be updated and will apply to all job executions for the job. Whenever a job execution remains in the IN_PROGRESS status for longer than this interval, the job execution will fail and switch to the terminal TIMED_OUT status.

" + } + }, + "documentation":"

Specifies the amount of time each device has to finish its execution of the job. A timer is started when the job execution status is set to IN_PROGRESS. If the job execution status is not set to another terminal state before the timer expires, it will be automatically set to TIMED_OUT.

" + }, + "AwsJobTimeoutInProgressTimeoutInMinutes":{"type":"long"}, "Behavior":{ "type":"structure", "required":["name"], @@ -4996,6 +5145,10 @@ "shape":"CertificateStatus", "documentation":"

The status of the certificate.

The status value REGISTER_INACTIVE is deprecated and should not be used.

" }, + "certificateMode":{ + "shape":"CertificateMode", + "documentation":"

The mode of the certificate.

" + }, "creationDate":{ "shape":"DateType", "documentation":"

The date and time the certificate was created.

" @@ -5070,6 +5223,10 @@ "validity":{ "shape":"CertificateValidity", "documentation":"

When the certificate is valid.

" + }, + "certificateMode":{ + "shape":"CertificateMode", + "documentation":"

The mode of the certificate.

" } }, "documentation":"

Describes a certificate.

" @@ -5080,6 +5237,13 @@ "min":64, "pattern":"(0x)?[a-fA-F0-9]+" }, + "CertificateMode":{ + "type":"string", + "enum":[ + "DEFAULT", + "SNI_ONLY" + ] + }, "CertificateName":{"type":"string"}, "CertificatePathOnDevice":{"type":"string"}, "CertificatePem":{ @@ -5405,6 +5569,10 @@ "shape":"AuthorizerStatus", "documentation":"

The status of the create authorizer request.

" }, + "tags":{ + "shape":"TagList", + "documentation":"

Metadata which can be used to manage the custom authorizer.

For URI Request parameters use format: ...key1=value1&key2=value2...

For the CLI command-line parameter use format: &&tags \"key1=value1&key2=value2...\"

For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"

" + }, "signingDisabled":{ "shape":"BooleanKey", "documentation":"

Specifies whether AWS IoT validates the token signature in an authorization request.

" @@ -5571,7 +5739,11 @@ }, "serviceType":{ "shape":"ServiceType", - "documentation":"

The type of service delivered by the endpoint.

" + "documentation":"

The type of service delivered by the endpoint.

AWS IoT Core currently supports only the DATA service type.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Metadata which can be used to manage the domain configuration.

For URI Request parameters use format: ...key1=value1&key2=value2...

For the CLI command-line parameter use format: &&tags \"key1=value1&key2=value2...\"

For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"

" } } }, @@ -5820,7 +5992,7 @@ }, "targets":{ "shape":"Targets", - "documentation":"

The targeted devices to receive OTA updates.

" + "documentation":"

The devices targeted to receive OTA updates.

" }, "protocols":{ "shape":"Protocols", @@ -5838,13 +6010,21 @@ "shape":"AwsJobPresignedUrlConfig", "documentation":"

Configuration information for pre-signed URLs.

" }, + "awsJobAbortConfig":{ + "shape":"AwsJobAbortConfig", + "documentation":"

The criteria that determine when and how a job abort takes place.

" + }, + "awsJobTimeoutConfig":{ + "shape":"AwsJobTimeoutConfig", + "documentation":"

Specifies the amount of time each device has to finish its execution of the job. A timer is started when the job execution status is set to IN_PROGRESS. If the job execution status is not set to another terminal state before the timer expires, it will be automatically set to TIMED_OUT.

" + }, "files":{ "shape":"OTAUpdateFiles", "documentation":"

The files to be streamed by the OTA update.

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The IAM role that allows access to the AWS IoT Jobs service.

" + "documentation":"

The IAM role that grants AWS IoT access to the Amazon S3, AWS IoT jobs and AWS Code Signing resources to create an OTA update job.

" }, "additionalParameters":{ "shape":"AdditionalParameterMap", @@ -5897,6 +6077,10 @@ "policyDocument":{ "shape":"PolicyDocument", "documentation":"

The JSON document that describes the policy. policyDocument must have a minimum length of 1, with a maximum length of 2048, excluding whitespace.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Metadata which can be used to manage the policy.

For URI Request parameters use format: ...key1=value1&key2=value2...

For the CLI command-line parameter use format: &&tags \"key1=value1&key2=value2...\"

For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"

" } }, "documentation":"

The input for the CreatePolicy operation.

" @@ -6032,6 +6216,10 @@ "shape":"RoleArn", "documentation":"

The role ARN for the role associated with the fleet provisioning template. This IoT role grants permission to provision a device.

" }, + "preProvisioningHook":{ + "shape":"ProvisioningHook", + "documentation":"

Creates a pre-provisioning hook template.

" + }, "tags":{ "shape":"TagList", "documentation":"

Metadata which can be used to manage the fleet provisioning template.

For URI Request parameters use format: ...key1=value1&key2=value2...

For the CLI command-line parameter use format: &&tags \"key1=value1&key2=value2...\"

For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"

" @@ -6121,6 +6309,10 @@ "credentialDurationSeconds":{ "shape":"CredentialDurationSeconds", "documentation":"

How long (in seconds) the credentials will be valid.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Metadata which can be used to manage the role alias.

For URI Request parameters use format: ...key1=value1&key2=value2...

For the CLI command-line parameter use format: &&tags \"key1=value1&key2=value2...\"

For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"

" } } }, @@ -6333,7 +6525,7 @@ "members":{ "thingName":{ "shape":"ThingName", - "documentation":"

The name of the thing to create.

", + "documentation":"

The name of the thing to create.

You can't change a thing's name after you create it. To change a thing's name, you must create a new thing, give it the new name, and then delete the old thing.

", "location":"uri", "locationName":"thingName" }, @@ -6746,7 +6938,7 @@ "members":{ "otaUpdateId":{ "shape":"OTAUpdateId", - "documentation":"

The OTA update ID to delete.

", + "documentation":"

The ID of the OTA update to delete.

", "location":"uri", "locationName":"otaUpdateId" }, @@ -6758,7 +6950,7 @@ }, "forceDeleteAWSJob":{ "shape":"ForceDeleteAWSJob", - "documentation":"

Specifies if the AWS Job associated with the OTA update should be deleted with the OTA update is deleted.

", + "documentation":"

Specifies if the AWS Job associated with the OTA update should be deleted when the OTA update is deleted.

", "location":"querystring", "locationName":"forceDeleteAWSJob" } @@ -7667,6 +7859,10 @@ "provisioningRoleArn":{ "shape":"RoleArn", "documentation":"

The ARN of the role associated with the provisioning template. This IoT role grants permission to provision a device.

" + }, + "preProvisioningHook":{ + "shape":"ProvisioningHook", + "documentation":"

Gets information about a pre-provisioned hook.

" } } }, @@ -7994,7 +8190,7 @@ "members":{ "defaultClientId":{ "shape":"ClientId", - "documentation":"

The default client ID.

" + "documentation":"

The default MQTT client ID. For a typical device, the thing name is also used as the default MQTT client ID. Although we don’t require a mapping between a thing's registry name and its use of MQTT client IDs, certificates, or shadow state, we recommend that you choose a thing name and use it as the MQTT client ID for the registry and the Device Shadow service.

This lets you better organize your AWS IoT fleet without removing the flexibility of the underlying device certificate model or shadows.

" }, "thingName":{ "shape":"ThingName", @@ -11839,6 +12035,7 @@ }, "MaxJobExecutionsPerMin":{ "type":"integer", + "max":1000, "min":1 }, "MaxResults":{ @@ -11877,7 +12074,7 @@ }, "operator":{ "shape":"DimensionValueOperator", - "documentation":"

Defines how the dimensionValues of a dimension are interpreted. For example, for DimensionType TOPIC_FILTER, with IN operator, a message will be counted only if its topic matches one of the topic filters. With NOT_IN Operator, a message will be counted only if it doesn't match any of the topic filters. The operator is optional: if it's not provided (is null), it will be interpreted as IN.

" + "documentation":"

Defines how the dimensionValues of a dimension are interpreted. For example, for dimension type TOPIC_FILTER, the IN operator, a message will be counted only if its topic matches one of the topic filters. With NOT_IN operator, a message will be counted only if it doesn't match any of the topic filters. The operator is optional: if it's not provided (is null), it will be interpreted as IN.

" } }, "documentation":"

The dimension of a metric.

" @@ -12293,6 +12490,12 @@ }, "PartitionKey":{"type":"string"}, "PayloadField":{"type":"string"}, + "PayloadVersion":{ + "type":"string", + "max":32, + "min":10, + "pattern":"^[0-9-]+$" + }, "Percent":{ "type":"double", "max":100, @@ -12469,6 +12672,21 @@ "max":2, "min":1 }, + "ProvisioningHook":{ + "type":"structure", + "required":["targetArn"], + "members":{ + "payloadVersion":{ + "shape":"PayloadVersion", + "documentation":"

The payload that was sent to the target function.

Note: Only Lambda functions are currently supported.

" + }, + "targetArn":{ + "shape":"TargetArn", + "documentation":"

The ARN of the target function.

Note: Only Lambda functions are currently supported.

" + } + }, + "documentation":"

Structure that contains payloadVersion and targetArn.

" + }, "ProvisioningTemplateListing":{ "type":"list", "member":{"shape":"ProvisioningTemplateSummary"} @@ -12666,6 +12884,10 @@ "registrationConfig":{ "shape":"RegistrationConfig", "documentation":"

Information about the registration configuration.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Metadata which can be used to manage the CA certificate.

For URI Request parameters use format: ...key1=value1&key2=value2...

For the CLI command-line parameter use format: &&tags \"key1=value1&key2=value2...\"

For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"

" } }, "documentation":"

The input to the RegisterCACertificate operation.

" @@ -12724,6 +12946,33 @@ }, "documentation":"

The output from the RegisterCertificate operation.

" }, + "RegisterCertificateWithoutCARequest":{ + "type":"structure", + "required":["certificatePem"], + "members":{ + "certificatePem":{ + "shape":"CertificatePem", + "documentation":"

The certificate data, in PEM format.

" + }, + "status":{ + "shape":"CertificateStatus", + "documentation":"

The status of the register certificate request.

" + } + } + }, + "RegisterCertificateWithoutCAResponse":{ + "type":"structure", + "members":{ + "certificateArn":{ + "shape":"CertificateArn", + "documentation":"

The Amazon Resource Name (ARN) of the registered certificate.

" + }, + "certificateId":{ + "shape":"CertificateId", + "documentation":"

The ID of the registered certificate. (The last part of the certificate ARN contains the certificate ID.

" + } + } + }, "RegisterThingRequest":{ "type":"structure", "required":["templateBody"], @@ -12734,7 +12983,7 @@ }, "parameters":{ "shape":"Parameters", - "documentation":"

The parameters for provisioning a thing. See Programmatic Provisioning for more information.

" + "documentation":"

The parameters for provisioning a thing. See Provisioning Templates for more information.

" } } }, @@ -12743,7 +12992,7 @@ "members":{ "certificatePem":{ "shape":"CertificatePem", - "documentation":"

.

" + "documentation":"

The certificate data, in PEM format.

" }, "resourceArns":{ "shape":"ResourceArns", @@ -12842,6 +13091,7 @@ }, "RemoveAuthorizerConfig":{"type":"boolean"}, "RemoveAutoRegistration":{"type":"boolean"}, + "RemoveHook":{"type":"boolean"}, "RemoveThingFromBillingGroupRequest":{ "type":"structure", "members":{ @@ -14011,6 +14261,7 @@ "TableName":{"type":"string"}, "Tag":{ "type":"structure", + "required":["Key"], "members":{ "Key":{ "shape":"TagKey", @@ -14023,7 +14274,12 @@ }, "documentation":"

A set of key/value pairs that are used to manage the resource.

" }, - "TagKey":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, "TagKeyList":{ "type":"list", "member":{"shape":"TagKey"} @@ -14054,9 +14310,16 @@ "members":{ } }, - "TagValue":{"type":"string"}, + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, "Target":{"type":"string"}, - "TargetArn":{"type":"string"}, + "TargetArn":{ + "type":"string", + "max":2048 + }, "TargetAuditCheckNames":{ "type":"list", "member":{"shape":"AuditCheckName"} @@ -14222,7 +14485,7 @@ }, "tokenSignature":{ "shape":"TokenSignature", - "documentation":"

The signature made with the token and your custom authentication service's private key.

" + "documentation":"

The signature made with the token and your custom authentication service's private key. This value must be Base-64-encoded.

" }, "httpContext":{ "shape":"HttpContext", @@ -15373,6 +15636,14 @@ "provisioningRoleArn":{ "shape":"RoleArn", "documentation":"

The ARN of the role associated with the provisioning template. This IoT role grants permission to provision a device.

" + }, + "preProvisioningHook":{ + "shape":"ProvisioningHook", + "documentation":"

Updates the pre-provisioning hook template.

" + }, + "removePreProvisioningHook":{ + "shape":"RemoveHook", + "documentation":"

Removes pre-provisioning hook template.

" } } }, @@ -15659,7 +15930,7 @@ "members":{ "thingName":{ "shape":"ThingName", - "documentation":"

The name of the thing to update.

", + "documentation":"

The name of the thing to update.

You can't change a thing's name. To change a thing's name, you must create a new thing, give it the new name, and then delete the old thing.

", "location":"uri", "locationName":"thingName" }, diff --git a/services/iot1clickdevices/pom.xml b/services/iot1clickdevices/pom.xml index ad02facc14a1..39e1d2f0084b 100644 --- a/services/iot1clickdevices/pom.xml +++ b/services/iot1clickdevices/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT iot1clickdevices AWS Java SDK :: Services :: IoT 1Click Devices Service diff --git a/services/iot1clickprojects/pom.xml b/services/iot1clickprojects/pom.xml index cb7ac9d97047..d9666fafeb0b 100644 --- a/services/iot1clickprojects/pom.xml +++ b/services/iot1clickprojects/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT iot1clickprojects AWS Java SDK :: Services :: IoT 1Click Projects diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index ebc5a7c2d63c..b9818d1517a9 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index 6427c94f8cfe..a098d155d884 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdataplane/src/main/resources/codegen-resources/paginators-1.json b/services/iotdataplane/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..5677bd8e4a2d --- /dev/null +++ b/services/iotdataplane/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,4 @@ +{ + "pagination": { + } +} diff --git a/services/iotdataplane/src/main/resources/codegen-resources/service-2.json b/services/iotdataplane/src/main/resources/codegen-resources/service-2.json index 88fba6cd551c..394d79bbc197 100644 --- a/services/iotdataplane/src/main/resources/codegen-resources/service-2.json +++ b/services/iotdataplane/src/main/resources/codegen-resources/service-2.json @@ -1,14 +1,14 @@ { "version":"2.0", "metadata":{ - "uid":"iot-data-2015-05-28", "apiVersion":"2015-05-28", "endpointPrefix":"data.iot", "protocol":"rest-json", "serviceFullName":"AWS IoT Data Plane", "serviceId":"IoT Data Plane", "signatureVersion":"v4", - "signingName":"iotdata" + "signingName":"iotdata", + "uid":"iot-data-2015-05-28" }, "operations":{ "DeleteThingShadow":{ @@ -29,7 +29,7 @@ {"shape":"MethodNotAllowedException"}, {"shape":"UnsupportedDocumentEncodingException"} ], - "documentation":"

Deletes the thing shadow for the specified thing.

For more information, see DeleteThingShadow in the AWS IoT Developer Guide.

" + "documentation":"

Deletes the shadow for the specified thing.

For more information, see DeleteThingShadow in the AWS IoT Developer Guide.

" }, "GetThingShadow":{ "name":"GetThingShadow", @@ -49,7 +49,26 @@ {"shape":"MethodNotAllowedException"}, {"shape":"UnsupportedDocumentEncodingException"} ], - "documentation":"

Gets the thing shadow for the specified thing.

For more information, see GetThingShadow in the AWS IoT Developer Guide.

" + "documentation":"

Gets the shadow for the specified thing.

For more information, see GetThingShadow in the AWS IoT Developer Guide.

" + }, + "ListNamedShadowsForThing":{ + "name":"ListNamedShadowsForThing", + "http":{ + "method":"GET", + "requestUri":"/api/things/shadow/ListNamedShadowsForThing/{thingName}" + }, + "input":{"shape":"ListNamedShadowsForThingRequest"}, + "output":{"shape":"ListNamedShadowsForThingResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"MethodNotAllowedException"} + ], + "documentation":"

Lists the shadows for the specified thing.

" }, "Publish":{ "name":"Publish", @@ -64,7 +83,7 @@ {"shape":"UnauthorizedException"}, {"shape":"MethodNotAllowedException"} ], - "documentation":"

Publishes state information.

For more information, see HTTP Protocol in the AWS IoT Developer Guide.

" + "documentation":"

Publishes state information.

For more information, see HTTP Protocol in the AWS IoT Developer Guide.

" }, "UpdateThingShadow":{ "name":"UpdateThingShadow", @@ -85,7 +104,7 @@ {"shape":"MethodNotAllowedException"}, {"shape":"UnsupportedDocumentEncodingException"} ], - "documentation":"

Updates the thing shadow for the specified thing.

For more information, see UpdateThingShadow in the AWS IoT Developer Guide.

" + "documentation":"

Updates the shadow for the specified thing.

For more information, see UpdateThingShadow in the AWS IoT Developer Guide.

" } }, "shapes":{ @@ -93,7 +112,7 @@ "type":"structure", "members":{ "message":{ - "shape":"ErrorMessage", + "shape":"errorMessage", "documentation":"

The message for the exception.

" } }, @@ -110,6 +129,12 @@ "documentation":"

The name of the thing.

", "location":"uri", "locationName":"thingName" + }, + "shadowName":{ + "shape":"ShadowName", + "documentation":"

The name of the shadow.

", + "location":"querystring", + "locationName":"name" } }, "documentation":"

The input for the DeleteThingShadow operation.

" @@ -126,7 +151,6 @@ "documentation":"

The output from the DeleteThingShadow operation.

", "payload":"payload" }, - "ErrorMessage":{"type":"string"}, "GetThingShadowRequest":{ "type":"structure", "required":["thingName"], @@ -136,6 +160,12 @@ "documentation":"

The name of the thing.

", "location":"uri", "locationName":"thingName" + }, + "shadowName":{ + "shape":"ShadowName", + "documentation":"

The name of the shadow.

", + "location":"querystring", + "locationName":"name" } }, "documentation":"

The input for the GetThingShadow operation.

" @@ -177,11 +207,52 @@ "exception":true }, "JsonDocument":{"type":"blob"}, + "ListNamedShadowsForThingRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ThingName", + "documentation":"

The name of the thing.

", + "location":"uri", + "locationName":"thingName" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "pageSize":{ + "shape":"PageSize", + "documentation":"

The result page size.

", + "location":"querystring", + "locationName":"pageSize" + } + } + }, + "ListNamedShadowsForThingResponse":{ + "type":"structure", + "members":{ + "results":{ + "shape":"NamedShadowList", + "documentation":"

The list of shadows for the specified thing.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + }, + "timestamp":{ + "shape":"Timestamp", + "documentation":"

The Epoch date and time the response was generated by AWS IoT.

" + } + } + }, "MethodNotAllowedException":{ "type":"structure", "members":{ "message":{ - "shape":"ErrorMessage", + "shape":"errorMessage", "documentation":"

The message for the exception.

" } }, @@ -189,6 +260,16 @@ "error":{"httpStatusCode":405}, "exception":true }, + "NamedShadowList":{ + "type":"list", + "member":{"shape":"ShadowName"} + }, + "NextToken":{"type":"string"}, + "PageSize":{ + "type":"integer", + "max":100, + "min":1 + }, "Payload":{"type":"blob"}, "PublishRequest":{ "type":"structure", @@ -223,7 +304,7 @@ "type":"structure", "members":{ "message":{ - "shape":"ErrorMessage", + "shape":"errorMessage", "documentation":"

The message for the exception.

" } }, @@ -256,11 +337,17 @@ "exception":true, "fault":true }, + "ShadowName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9:_-]+" + }, "ThingName":{ "type":"string", "max":128, "min":1, - "pattern":"[a-zA-Z0-9_-]+" + "pattern":"[a-zA-Z0-9:_-]+" }, "ThrottlingException":{ "type":"structure", @@ -274,6 +361,7 @@ "error":{"httpStatusCode":429}, "exception":true }, + "Timestamp":{"type":"long"}, "Topic":{"type":"string"}, "UnauthorizedException":{ "type":"structure", @@ -312,6 +400,12 @@ "location":"uri", "locationName":"thingName" }, + "shadowName":{ + "shape":"ShadowName", + "documentation":"

The name of the shadow.

", + "location":"querystring", + "locationName":"name" + }, "payload":{ "shape":"JsonDocument", "documentation":"

The state information, in JSON format.

" @@ -333,5 +427,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"AWS IoT

AWS IoT-Data enables secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. It implements a broker for applications and things to publish messages over HTTP (Publish) and retrieve, update, and delete thing shadows. A thing shadow is a persistent representation of your things and their state in the AWS cloud.

" + "documentation":"AWS IoT

AWS IoT-Data enables secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. It implements a broker for applications and things to publish messages over HTTP (Publish) and retrieve, update, and delete shadows. A shadow is a persistent representation of your things and their state in the AWS cloud.

Find the endpoint address for actions in the AWS IoT data plane by running this CLI command:

aws iot describe-endpoint --endpoint-type iot:Data-ATS

The service name used by AWS Signature Version 4 to sign requests is: iotdevicegateway.

" } diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index c7d6aa0759e0..d69d386c206e 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/iotevents/src/main/resources/codegen-resources/service-2.json b/services/iotevents/src/main/resources/codegen-resources/service-2.json index 58e834925d5f..b678ef870726 100644 --- a/services/iotevents/src/main/resources/codegen-resources/service-2.json +++ b/services/iotevents/src/main/resources/codegen-resources/service-2.json @@ -326,7 +326,7 @@ }, "iotEvents":{ "shape":"IotEventsAction", - "documentation":"

Sends an AWS IoT Events input, passing in information about the detector model instance and the event that triggered the action.

" + "documentation":"

Sends AWS IoT Events input, which passes information about the detector model instance and the event that triggered the action.

" }, "sqs":{ "shape":"SqsAction", @@ -335,6 +335,18 @@ "firehose":{ "shape":"FirehoseAction", "documentation":"

Sends information about the detector model instance and the event that triggered the action to an Amazon Kinesis Data Firehose delivery stream.

" + }, + "dynamoDB":{ + "shape":"DynamoDBAction", + "documentation":"

Writes to the DynamoDB table that you created. The default action payload contains all attribute-value pairs that have the information about the detector model instance and the event that triggered the action. You can also customize the payload. One column of the DynamoDB table receives all attribute-value pairs in the payload that you specify. For more information, see Actions in AWS IoT Events Developer Guide.

" + }, + "dynamoDBv2":{ + "shape":"DynamoDBv2Action", + "documentation":"

Writes to the DynamoDB table that you created. The default action payload contains all attribute-value pairs that have the information about the detector model instance and the event that triggered the action. You can also customize the payload. A separate column of the DynamoDB table receives one attribute-value pair in the payload that you specify. For more information, see Actions in AWS IoT Events Developer Guide.

" + }, + "iotSiteWise":{ + "shape":"IotSiteWiseAction", + "documentation":"

Sends information about the detector model instance and the event that triggered the action to an asset property in AWS IoT SiteWise .

" } }, "documentation":"

An action to be performed when the condition is TRUE.

" @@ -348,6 +360,73 @@ "max":2048, "min":1 }, + "AssetId":{"type":"string"}, + "AssetPropertyAlias":{"type":"string"}, + "AssetPropertyBooleanValue":{"type":"string"}, + "AssetPropertyDoubleValue":{"type":"string"}, + "AssetPropertyEntryId":{"type":"string"}, + "AssetPropertyId":{"type":"string"}, + "AssetPropertyIntegerValue":{"type":"string"}, + "AssetPropertyOffsetInNanos":{"type":"string"}, + "AssetPropertyQuality":{"type":"string"}, + "AssetPropertyStringValue":{"type":"string"}, + "AssetPropertyTimeInSeconds":{"type":"string"}, + "AssetPropertyTimestamp":{ + "type":"structure", + "required":["timeInSeconds"], + "members":{ + "timeInSeconds":{ + "shape":"AssetPropertyTimeInSeconds", + "documentation":"

The timestamp, in seconds, in the Unix epoch format. The valid range is between 1-31556889864403199. You can also specify an expression.

" + }, + "offsetInNanos":{ + "shape":"AssetPropertyOffsetInNanos", + "documentation":"

The nanosecond offset converted from timeInSeconds. The valid range is between 0-999999999. You can also specify an expression.

" + } + }, + "documentation":"

A structure that contains timestamp information. For more information, see TimeInNanos in the AWS IoT SiteWise API Reference.

For parameters that are string data type, you can specify the following options:

  • Use a string. For example, the timeInSeconds value can be '1586400675'.

  • Use an expression. For example, the timeInSeconds value can be '${$input.TemperatureInput.sensorData.timestamp/1000}'.

    For more information, see Expressions in the AWS IoT Events Developer Guide.

" + }, + "AssetPropertyValue":{ + "type":"structure", + "required":["value"], + "members":{ + "value":{ + "shape":"AssetPropertyVariant", + "documentation":"

The value to send to an asset property.

" + }, + "timestamp":{ + "shape":"AssetPropertyTimestamp", + "documentation":"

The timestamp associated with the asset property value. The default is the current event time.

" + }, + "quality":{ + "shape":"AssetPropertyQuality", + "documentation":"

The quality of the asset property value. The value must be GOOD, BAD, or UNCERTAIN. You can also specify an expression.

" + } + }, + "documentation":"

A structure that contains value information. For more information, see AssetPropertyValue in the AWS IoT SiteWise API Reference.

For parameters that are string data type, you can specify the following options:

  • Use a string. For example, the quality value can be 'GOOD'.

  • Use an expression. For example, the quality value can be $input.TemperatureInput.sensorData.quality .

    For more information, see Expressions in the AWS IoT Events Developer Guide.

" + }, + "AssetPropertyVariant":{ + "type":"structure", + "members":{ + "stringValue":{ + "shape":"AssetPropertyStringValue", + "documentation":"

The asset property value is a string. You can also specify an expression. If you use an expression, the evaluated result should be a string.

" + }, + "integerValue":{ + "shape":"AssetPropertyIntegerValue", + "documentation":"

The asset property value is an integer. You can also specify an expression. If you use an expression, the evaluated result should be an integer.

" + }, + "doubleValue":{ + "shape":"AssetPropertyDoubleValue", + "documentation":"

The asset property value is a double. You can also specify an expression. If you use an expression, the evaluated result should be a double.

" + }, + "booleanValue":{ + "shape":"AssetPropertyBooleanValue", + "documentation":"

The asset property value is a Boolean value that must be TRUE or FALSE. You can also specify an expression. If you use an expression, the evaluated result should be a Boolean value.

" + } + }, + "documentation":"

A structure that contains an asset property value. For more information, see Variant in the AWS IoT SiteWise API Reference.

You must specify one of the following value types, depending on the dataType of the specified asset property. For more information, see AssetProperty in the AWS IoT SiteWise API Reference.

For parameters that are string data type, you can specify the following options:

  • Use a string. For example, the doubleValue value can be '47.9'.

  • Use an expression. For example, the doubleValue value can be $input.TemperatureInput.sensorData.temperature.

    For more information, see Expressions in the AWS IoT Events Developer Guide.

" + }, "Attribute":{ "type":"structure", "required":["jsonPath"], @@ -386,6 +465,10 @@ "type":"string", "max":512 }, + "ContentExpression":{ + "type":"string", + "min":1 + }, "CreateDetectorModelRequest":{ "type":"structure", "required":[ @@ -636,7 +719,7 @@ }, "key":{ "shape":"AttributeJsonPath", - "documentation":"

The input attribute key used to identify a device or system to create a detector (an instance of the detector model) and then to route each input received to the appropriate detector (instance). This parameter uses a JSON-path expression in the message payload of each input to specify the attribute-value pair that is used to identify the device associated with the input.

" + "documentation":"

The value used to identify a detector instance. When a device or system sends input, a new detector instance with a unique key value is created. AWS IoT Events can continue to route input to its corresponding detector instance based on this identifying information.

This parameter uses a JSON-path expression to select the attribute-value pair in the message payload that is used for identification. To route the message to the correct detector instance, the device must send a message payload that contains the same attribute-value.

" }, "evaluationMethod":{ "shape":"EvaluationMethod", @@ -754,6 +837,71 @@ }, "documentation":"

Information about the detector model version.

" }, + "DynamoDBAction":{ + "type":"structure", + "required":[ + "hashKeyField", + "hashKeyValue", + "tableName" + ], + "members":{ + "hashKeyType":{ + "shape":"DynamoKeyType", + "documentation":"

The data type for the hash key (also called the partition key). You can specify the following values:

  • STRING - The hash key is a string.

  • NUMBER - The hash key is a number.

If you don't specify hashKeyType, the default value is STRING.

" + }, + "hashKeyField":{ + "shape":"DynamoKeyField", + "documentation":"

The name of the hash key (also called the partition key).

" + }, + "hashKeyValue":{ + "shape":"DynamoKeyValue", + "documentation":"

The value of the hash key (also called the partition key).

" + }, + "rangeKeyType":{ + "shape":"DynamoKeyType", + "documentation":"

The data type for the range key (also called the sort key), You can specify the following values:

  • STRING - The range key is a string.

  • NUMBER - The range key is number.

If you don't specify rangeKeyField, the default value is STRING.

" + }, + "rangeKeyField":{ + "shape":"DynamoKeyField", + "documentation":"

The name of the range key (also called the sort key).

" + }, + "rangeKeyValue":{ + "shape":"DynamoKeyValue", + "documentation":"

The value of the range key (also called the sort key).

" + }, + "operation":{ + "shape":"DynamoOperation", + "documentation":"

The type of operation to perform. You can specify the following values:

  • INSERT - Insert data as a new item into the DynamoDB table. This item uses the specified hash key as a partition key. If you specified a range key, the item uses the range key as a sort key.

  • UPDATE - Update an existing item of the DynamoDB table with new data. This item's partition key must match the specified hash key. If you specified a range key, the range key must match the item's sort key.

  • DELETE - Delete an existing item of the DynamoDB table. This item's partition key must match the specified hash key. If you specified a range key, the range key must match the item's sort key.

If you don't specify this parameter, AWS IoT Events triggers the INSERT operation.

" + }, + "payloadField":{ + "shape":"DynamoKeyField", + "documentation":"

The name of the DynamoDB column that receives the action payload.

If you don't specify this parameter, the name of the DynamoDB column is payload.

" + }, + "tableName":{ + "shape":"DynamoTableName", + "documentation":"

The name of the DynamoDB table.

" + }, + "payload":{"shape":"Payload"} + }, + "documentation":"

Defines an action to write to the Amazon DynamoDB table that you created. The standard action payload contains all attribute-value pairs that have the information about the detector model instance and the event that triggered the action. You can also customize the payload. One column of the DynamoDB table receives all attribute-value pairs in the payload that you specify.

The tableName and hashKeyField values must match the table name and the partition key of the DynamoDB table.

If the DynamoDB table also has a sort key, you must specify rangeKeyField. The rangeKeyField value must match the sort key.

The hashKeyValue and rangeKeyValue use substitution templates. These templates provide data at runtime. The syntax is ${sql-expression}.

You can use expressions for parameters that are string data type. For more information, see Expressions in the AWS IoT Events Developer Guide.

If the defined payload type is a string, DynamoDBAction writes non-JSON data to the DynamoDB table as binary data. The DynamoDB console displays the data as Base64-encoded text. The payloadField is <payload-field>_raw.

" + }, + "DynamoDBv2Action":{ + "type":"structure", + "required":["tableName"], + "members":{ + "tableName":{ + "shape":"DynamoTableName", + "documentation":"

The name of the DynamoDB table.

" + }, + "payload":{"shape":"Payload"} + }, + "documentation":"

Defines an action to write to the Amazon DynamoDB table that you created. The default action payload contains all attribute-value pairs that have the information about the detector model instance and the event that triggered the action. You can also customize the payload. A separate column of the DynamoDB table receives one attribute-value pair in the payload that you specify.

The type value for Payload must be JSON.

You can use expressions for parameters that are strings. For more information, see Expressions in the AWS IoT Events Developer Guide.

" + }, + "DynamoKeyField":{"type":"string"}, + "DynamoKeyType":{"type":"string"}, + "DynamoKeyValue":{"type":"string"}, + "DynamoOperation":{"type":"string"}, + "DynamoTableName":{"type":"string"}, "EvaluationMethod":{ "type":"string", "enum":[ @@ -799,6 +947,10 @@ "separator":{ "shape":"FirehoseSeparator", "documentation":"

A character separator that is used to separate records written to the Kinesis Data Firehose delivery stream. Valid values are: '\\n' (newline), '\\t' (tab), '\\r\\n' (Windows newline), ',' (comma).

" + }, + "payload":{ + "shape":"Payload", + "documentation":"

You can configure the action payload when you send a message to an Amazon Kinesis Data Firehose delivery stream.

" } }, "documentation":"

Sends information about the detector model instance and the event that triggered the action to an Amazon Kinesis Data Firehose delivery stream.

" @@ -955,10 +1107,41 @@ "inputName":{ "shape":"InputName", "documentation":"

The name of the AWS IoT Events input where the data is sent.

" + }, + "payload":{ + "shape":"Payload", + "documentation":"

You can configure the action payload when you send a message to an AWS IoT Events input.

" } }, "documentation":"

Sends an AWS IoT Events input, passing in information about the detector model instance and the event that triggered the action.

" }, + "IotSiteWiseAction":{ + "type":"structure", + "required":["propertyValue"], + "members":{ + "entryId":{ + "shape":"AssetPropertyEntryId", + "documentation":"

A unique identifier for this entry. You can use the entry ID to track which data entry causes an error in case of failure. The default is a new unique identifier. You can also specify an expression.

" + }, + "assetId":{ + "shape":"AssetId", + "documentation":"

The ID of the asset that has the specified property. You can specify an expression.

" + }, + "propertyId":{ + "shape":"AssetPropertyId", + "documentation":"

The ID of the asset property. You can specify an expression.

" + }, + "propertyAlias":{ + "shape":"AssetPropertyAlias", + "documentation":"

The alias of the asset property. You can also specify an expression.

" + }, + "propertyValue":{ + "shape":"AssetPropertyValue", + "documentation":"

The value to send to the asset property. This value contains timestamp, quality, and value (TQV) information.

" + } + }, + "documentation":"

Sends information about the detector model instance and the event that triggered the action to a specified asset property in AWS IoT SiteWise.

You must specify either propertyAlias or both assetId and propertyId to identify the target asset property in AWS IoT SiteWise.

For parameters that are string data type, you can specify the following options:

  • Use a string. For example, the propertyAlias value can be '/company/windfarm/3/turbine/7/temperature'.

  • Use an expression. For example, the propertyAlias value can be 'company/windfarm/${$input.TemperatureInput.sensorData.windfarmID}/turbine/${$input.TemperatureInput.sensorData.turbineID}/temperature'.

    For more information, see Expressions in the AWS IoT Events Developer Guide.

" + }, "IotTopicPublishAction":{ "type":"structure", "required":["mqttTopic"], @@ -966,6 +1149,10 @@ "mqttTopic":{ "shape":"MQTTTopic", "documentation":"

The MQTT topic of the message. You can use a string expression that includes variables ($variable.<variable-name>) and input values ($input.<input-name>.<path-to-datum>) as the topic string.

" + }, + "payload":{ + "shape":"Payload", + "documentation":"

You can configure the action payload when you publish a message to an AWS IoT Core topic.

" } }, "documentation":"

Information required to publish the MQTT message through the AWS IoT message broker.

" @@ -983,6 +1170,10 @@ "functionArn":{ "shape":"AmazonResourceName", "documentation":"

The ARN of the Lambda function that is executed.

" + }, + "payload":{ + "shape":"Payload", + "documentation":"

You can configure the action payload when you send a message to a Lambda function.

" } }, "documentation":"

Calls a Lambda function, passing in information about the detector model instance and the event that triggered the action.

" @@ -1169,7 +1360,7 @@ "members":{ "events":{ "shape":"Events", - "documentation":"

Specifies the actions that are performed when the state is entered and the condition is TRUE.

" + "documentation":"

Specifies the actions that are performed when the state is entered and the condition is TRUE.

" } }, "documentation":"

When entering this state, perform these actions if the condition is TRUE.

" @@ -1179,10 +1370,10 @@ "members":{ "events":{ "shape":"Events", - "documentation":"

Specifies the actions that are performed when the state is exited and the condition is TRUE.

" + "documentation":"

Specifies the actions that are performed when the state is exited and the condition is TRUE.

" } }, - "documentation":"

When exiting this state, perform these actions if the specified condition is TRUE.

" + "documentation":"

When exiting this state, perform these actions if the specified condition is TRUE.

" }, "OnInputLifecycle":{ "type":"structure", @@ -1198,6 +1389,31 @@ }, "documentation":"

Specifies the actions performed when the condition evaluates to TRUE.

" }, + "Payload":{ + "type":"structure", + "required":[ + "contentExpression", + "type" + ], + "members":{ + "contentExpression":{ + "shape":"ContentExpression", + "documentation":"

The content of the payload. You can use a string expression that includes quoted strings ('<string>'), variables ($variable.<variable-name>), input values ($input.<input-name>.<path-to-datum>), string concatenations, and quoted strings that contain ${} as the content. The recommended maximum size of a content expression is 1 KB.

" + }, + "type":{ + "shape":"PayloadType", + "documentation":"

The value of the payload type can be either STRING or JSON.

" + } + }, + "documentation":"

Information needed to configure the payload.

By default, AWS IoT Events generates a standard payload in JSON for any action. This action payload contains all attribute-value pairs that have the information about the detector model instance and the event triggered the action. To configure the action payload, you can use contentExpression.

" + }, + "PayloadType":{ + "type":"string", + "enum":[ + "STRING", + "JSON" + ] + }, "PutLoggingOptionsRequest":{ "type":"structure", "required":["loggingOptions"], @@ -1218,7 +1434,7 @@ "documentation":"

The name of the timer to reset.

" } }, - "documentation":"

Information required to reset the timer. The timer is reset to the previously evaluated result of the duration.

" + "documentation":"

Information required to reset the timer. The timer is reset to the previously evaluated result of the duration. The duration expression isn't reevaluated when you reset the timer.

" }, "ResourceAlreadyExistsException":{ "type":"structure", @@ -1271,6 +1487,10 @@ "targetArn":{ "shape":"AmazonResourceName", "documentation":"

The ARN of the Amazon SNS target where the message is sent.

" + }, + "payload":{ + "shape":"Payload", + "documentation":"

You can configure the action payload when you send a message as an Amazon SNS push notification.

" } }, "documentation":"

Information required to publish the Amazon SNS message.

" @@ -1303,7 +1523,7 @@ }, "seconds":{ "shape":"Seconds", - "documentation":"

The number of seconds until the timer expires. The minimum value is 60 seconds to ensure accuracy.

", + "documentation":"

The number of seconds until the timer expires. The minimum value is 60 seconds to ensure accuracy. The maximum value is 31622400 seconds.

", "deprecated":true, "deprecatedMessage":"seconds is deprecated. You can use durationExpression for SetTimerAction. The value of seconds can be used as a string expression for durationExpression." }, @@ -1342,7 +1562,11 @@ }, "useBase64":{ "shape":"UseBase64", - "documentation":"

Set this to TRUE if you want the data to be base-64 encoded before it is written to the queue.

" + "documentation":"

Set this to TRUE if you want the data to be base-64 encoded before it is written to the queue. Otherwise, set this to FALSE.

" + }, + "payload":{ + "shape":"Payload", + "documentation":"

You can configure the action payload when you send a message to an Amazon SQS queue.

" } }, "documentation":"

Sends information about the detector model instance and the event that triggered the action to an Amazon SQS queue.

" @@ -1365,7 +1589,7 @@ }, "onExit":{ "shape":"OnExitLifecycle", - "documentation":"

When exiting this state, perform these actions if the specified condition is TRUE.

" + "documentation":"

When exiting this state, perform these actions if the specified condition is TRUE.

" } }, "documentation":"

Information that defines a state of a detector.

" @@ -1617,5 +1841,5 @@ "resourceArn":{"type":"string"}, "resourceId":{"type":"string"} }, - "documentation":"

AWS IoT Events monitors your equipment or device fleets for failures or changes in operation, and triggers actions when such events occur. You can use AWS IoT Events API commands to create, read, update, and delete inputs and detector models, and to list their versions.

" + "documentation":"

AWS IoT Events monitors your equipment or device fleets for failures or changes in operation, and triggers actions when such events occur. You can use AWS IoT Events API operations to create, read, update, and delete inputs and detector models, and to list their versions.

" } diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index 2d26eab1f9e7..b0d4fa0a80da 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index f92532a0bf8b..b08f87d50eae 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index bbfcacc05a6c..5c5a731f3adb 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml new file mode 100644 index 000000000000..36d02908cb99 --- /dev/null +++ b/services/iotsitewise/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.13.56-SNAPSHOT + + iotsitewise + AWS Java SDK :: Services :: Io T Site Wise + The AWS Java SDK for Io T Site Wise module holds the client classes that are used for + communicating with Io T Site Wise. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.iotsitewise + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/iotsitewise/src/main/resources/codegen-resources/paginators-1.json b/services/iotsitewise/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..cb6ceb445d38 --- /dev/null +++ b/services/iotsitewise/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,70 @@ +{ + "pagination": { + "GetAssetPropertyAggregates": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "aggregatedValues" + }, + "GetAssetPropertyValueHistory": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetPropertyValueHistory" + }, + "ListAccessPolicies": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "accessPolicySummaries" + }, + "ListAssetModels": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetModelSummaries" + }, + "ListAssets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetSummaries" + }, + "ListAssociatedAssets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetSummaries" + }, + "ListDashboards": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "dashboardSummaries" + }, + "ListGateways": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "gatewaySummaries" + }, + "ListPortals": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "portalSummaries" + }, + "ListProjectAssets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assetIds" + }, + "ListProjects": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "projectSummaries" + } + } +} diff --git a/services/iotsitewise/src/main/resources/codegen-resources/service-2.json b/services/iotsitewise/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..b7eaf33a9ae9 --- /dev/null +++ b/services/iotsitewise/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,4855 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-12-02", + "endpointPrefix":"iotsitewise", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS IoT SiteWise", + "serviceId":"IoTSiteWise", + "signatureVersion":"v4", + "signingName":"iotsitewise", + "uid":"iotsitewise-2019-12-02" + }, + "operations":{ + "AssociateAssets":{ + "name":"AssociateAssets", + "http":{ + "method":"POST", + "requestUri":"/assets/{assetId}/associate" + }, + "input":{"shape":"AssociateAssetsRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Associates a child asset with the given parent asset through a hierarchy defined in the parent asset's model. For more information, see Associating Assets in the AWS IoT SiteWise User Guide.

", + "endpoint":{"hostPrefix":"model."} + }, + "BatchAssociateProjectAssets":{ + "name":"BatchAssociateProjectAssets", + "http":{ + "method":"POST", + "requestUri":"/projects/{projectId}/assets/associate", + "responseCode":200 + }, + "input":{"shape":"BatchAssociateProjectAssetsRequest"}, + "output":{"shape":"BatchAssociateProjectAssetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Associates a group (batch) of assets with an AWS IoT SiteWise Monitor project.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "BatchDisassociateProjectAssets":{ + "name":"BatchDisassociateProjectAssets", + "http":{ + "method":"POST", + "requestUri":"/projects/{projectId}/assets/disassociate", + "responseCode":200 + }, + "input":{"shape":"BatchDisassociateProjectAssetsRequest"}, + "output":{"shape":"BatchDisassociateProjectAssetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Disassociates a group (batch) of assets from an AWS IoT SiteWise Monitor project.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "BatchPutAssetPropertyValue":{ + "name":"BatchPutAssetPropertyValue", + "http":{ + "method":"POST", + "requestUri":"/properties" + }, + "input":{"shape":"BatchPutAssetPropertyValueRequest"}, + "output":{"shape":"BatchPutAssetPropertyValueResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Sends a list of asset property values to AWS IoT SiteWise. Each value is a timestamp-quality-value (TQV) data point. For more information, see Ingesting Data Using the API in the AWS IoT SiteWise User Guide.

To identify an asset property, you must specify one of the following:

  • The assetId and propertyId of an asset property.

  • A propertyAlias, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see UpdateAssetProperty.

With respect to Unix epoch time, AWS IoT SiteWise accepts only TQVs that have a timestamp of no more than 15 minutes in the past and no more than 5 minutes in the future. AWS IoT SiteWise rejects timestamps outside of the inclusive range of [-15, +5] minutes and returns a TimestampOutOfRangeException error.

For each asset property, AWS IoT SiteWise overwrites TQVs with duplicate timestamps unless the newer TQV has a different quality. For example, if you store a TQV {T1, GOOD, V1}, then storing {T1, GOOD, V2} replaces the existing TQV.

", + "endpoint":{"hostPrefix":"data."} + }, + "CreateAccessPolicy":{ + "name":"CreateAccessPolicy", + "http":{ + "method":"POST", + "requestUri":"/access-policies", + "responseCode":201 + }, + "input":{"shape":"CreateAccessPolicyRequest"}, + "output":{"shape":"CreateAccessPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates an access policy that grants the specified AWS Single Sign-On user or group access to the specified AWS IoT SiteWise Monitor portal or project resource.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "CreateAsset":{ + "name":"CreateAsset", + "http":{ + "method":"POST", + "requestUri":"/assets", + "responseCode":202 + }, + "input":{"shape":"CreateAssetRequest"}, + "output":{"shape":"CreateAssetResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Creates an asset from an existing asset model. For more information, see Creating Assets in the AWS IoT SiteWise User Guide.

", + "endpoint":{"hostPrefix":"model."} + }, + "CreateAssetModel":{ + "name":"CreateAssetModel", + "http":{ + "method":"POST", + "requestUri":"/asset-models", + "responseCode":202 + }, + "input":{"shape":"CreateAssetModelRequest"}, + "output":{"shape":"CreateAssetModelResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Creates an asset model from specified property and hierarchy definitions. You create assets from asset models. With asset models, you can easily create assets of the same type that have standardized definitions. Each asset created from a model inherits the asset model's property and hierarchy definitions. For more information, see Defining Asset Models in the AWS IoT SiteWise User Guide.

", + "endpoint":{"hostPrefix":"model."} + }, + "CreateDashboard":{ + "name":"CreateDashboard", + "http":{ + "method":"POST", + "requestUri":"/dashboards", + "responseCode":201 + }, + "input":{"shape":"CreateDashboardRequest"}, + "output":{"shape":"CreateDashboardResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a dashboard in an AWS IoT SiteWise Monitor project.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "CreateGateway":{ + "name":"CreateGateway", + "http":{ + "method":"POST", + "requestUri":"/20200301/gateways", + "responseCode":201 + }, + "input":{"shape":"CreateGatewayRequest"}, + "output":{"shape":"CreateGatewayResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a gateway, which is a virtual or edge device that delivers industrial data streams from local servers to AWS IoT SiteWise. For more information, see Ingesting data using a gateway in the AWS IoT SiteWise User Guide.

", + "endpoint":{"hostPrefix":"edge."} + }, + "CreatePortal":{ + "name":"CreatePortal", + "http":{ + "method":"POST", + "requestUri":"/portals", + "responseCode":202 + }, + "input":{"shape":"CreatePortalRequest"}, + "output":{"shape":"CreatePortalResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a portal, which can contain projects and dashboards. Before you can create a portal, you must configure AWS Single Sign-On in the current Region. AWS IoT SiteWise Monitor uses AWS SSO to manage user permissions. For more information, see Enabling AWS SSO in the AWS IoT SiteWise User Guide.

Before you can sign in to a new portal, you must add at least one AWS SSO user or group to that portal. For more information, see Adding or Removing Portal Administrators in the AWS IoT SiteWise User Guide.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "CreateProject":{ + "name":"CreateProject", + "http":{ + "method":"POST", + "requestUri":"/projects", + "responseCode":201 + }, + "input":{"shape":"CreateProjectRequest"}, + "output":{"shape":"CreateProjectResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a project in the specified portal.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "DeleteAccessPolicy":{ + "name":"DeleteAccessPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/access-policies/{accessPolicyId}", + "responseCode":204 + }, + "input":{"shape":"DeleteAccessPolicyRequest"}, + "output":{"shape":"DeleteAccessPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes an access policy that grants the specified AWS Single Sign-On identity access to the specified AWS IoT SiteWise Monitor resource. You can use this operation to revoke access to an AWS IoT SiteWise Monitor resource.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "DeleteAsset":{ + "name":"DeleteAsset", + "http":{ + "method":"DELETE", + "requestUri":"/assets/{assetId}", + "responseCode":202 + }, + "input":{"shape":"DeleteAssetRequest"}, + "output":{"shape":"DeleteAssetResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Deletes an asset. This action can't be undone. For more information, see Deleting Assets and Models in the AWS IoT SiteWise User Guide.

You can't delete an asset that's associated to another asset. For more information, see DisassociateAssets.

", + "endpoint":{"hostPrefix":"model."} + }, + "DeleteAssetModel":{ + "name":"DeleteAssetModel", + "http":{ + "method":"DELETE", + "requestUri":"/asset-models/{assetModelId}", + "responseCode":202 + }, + "input":{"shape":"DeleteAssetModelRequest"}, + "output":{"shape":"DeleteAssetModelResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Deletes an asset model. This action can't be undone. You must delete all assets created from an asset model before you can delete the model. Also, you can't delete an asset model if a parent asset model exists that contains a property formula expression that depends on the asset model that you want to delete. For more information, see Deleting Assets and Models in the AWS IoT SiteWise User Guide.

", + "endpoint":{"hostPrefix":"model."} + }, + "DeleteDashboard":{ + "name":"DeleteDashboard", + "http":{ + "method":"DELETE", + "requestUri":"/dashboards/{dashboardId}", + "responseCode":204 + }, + "input":{"shape":"DeleteDashboardRequest"}, + "output":{"shape":"DeleteDashboardResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes a dashboard from AWS IoT SiteWise Monitor.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "DeleteGateway":{ + "name":"DeleteGateway", + "http":{ + "method":"DELETE", + "requestUri":"/20200301/gateways/{gatewayId}" + }, + "input":{"shape":"DeleteGatewayRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes a gateway from AWS IoT SiteWise. When you delete a gateway, some of the gateway's files remain in your gateway's file system. For more information, see Data retention in the AWS IoT SiteWise User Guide.

", + "endpoint":{"hostPrefix":"edge."} + }, + "DeletePortal":{ + "name":"DeletePortal", + "http":{ + "method":"DELETE", + "requestUri":"/portals/{portalId}", + "responseCode":202 + }, + "input":{"shape":"DeletePortalRequest"}, + "output":{"shape":"DeletePortalResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Deletes a portal from AWS IoT SiteWise Monitor.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "DeleteProject":{ + "name":"DeleteProject", + "http":{ + "method":"DELETE", + "requestUri":"/projects/{projectId}", + "responseCode":204 + }, + "input":{"shape":"DeleteProjectRequest"}, + "output":{"shape":"DeleteProjectResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes a project from AWS IoT SiteWise Monitor.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "DescribeAccessPolicy":{ + "name":"DescribeAccessPolicy", + "http":{ + "method":"GET", + "requestUri":"/access-policies/{accessPolicyId}", + "responseCode":200 + }, + "input":{"shape":"DescribeAccessPolicyRequest"}, + "output":{"shape":"DescribeAccessPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Describes an access policy, which specifies an AWS SSO user or group's access to an AWS IoT SiteWise Monitor portal or project.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "DescribeAsset":{ + "name":"DescribeAsset", + "http":{ + "method":"GET", + "requestUri":"/assets/{assetId}" + }, + "input":{"shape":"DescribeAssetRequest"}, + "output":{"shape":"DescribeAssetResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about an asset.

", + "endpoint":{"hostPrefix":"model."} + }, + "DescribeAssetModel":{ + "name":"DescribeAssetModel", + "http":{ + "method":"GET", + "requestUri":"/asset-models/{assetModelId}" + }, + "input":{"shape":"DescribeAssetModelRequest"}, + "output":{"shape":"DescribeAssetModelResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about an asset model.

", + "endpoint":{"hostPrefix":"model."} + }, + "DescribeAssetProperty":{ + "name":"DescribeAssetProperty", + "http":{ + "method":"GET", + "requestUri":"/assets/{assetId}/properties/{propertyId}" + }, + "input":{"shape":"DescribeAssetPropertyRequest"}, + "output":{"shape":"DescribeAssetPropertyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about an asset's property.

", + "endpoint":{"hostPrefix":"model."} + }, + "DescribeDashboard":{ + "name":"DescribeDashboard", + "http":{ + "method":"GET", + "requestUri":"/dashboards/{dashboardId}", + "responseCode":200 + }, + "input":{"shape":"DescribeDashboardRequest"}, + "output":{"shape":"DescribeDashboardResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about a dashboard.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "DescribeGateway":{ + "name":"DescribeGateway", + "http":{ + "method":"GET", + "requestUri":"/20200301/gateways/{gatewayId}" + }, + "input":{"shape":"DescribeGatewayRequest"}, + "output":{"shape":"DescribeGatewayResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about a gateway.

", + "endpoint":{"hostPrefix":"edge."} + }, + "DescribeGatewayCapabilityConfiguration":{ + "name":"DescribeGatewayCapabilityConfiguration", + "http":{ + "method":"GET", + "requestUri":"/20200301/gateways/{gatewayId}/capability/{capabilityNamespace}" + }, + "input":{"shape":"DescribeGatewayCapabilityConfigurationRequest"}, + "output":{"shape":"DescribeGatewayCapabilityConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about a gateway capability configuration. Each gateway capability defines data sources for a gateway. A capability configuration can contain multiple data source configurations. If you define OPC-UA sources for a gateway in the AWS IoT SiteWise console, all of your OPC-UA sources are stored in one capability configuration. To list all capability configurations for a gateway, use DescribeGateway.

", + "endpoint":{"hostPrefix":"edge."} + }, + "DescribeLoggingOptions":{ + "name":"DescribeLoggingOptions", + "http":{ + "method":"GET", + "requestUri":"/logging" + }, + "input":{"shape":"DescribeLoggingOptionsRequest"}, + "output":{"shape":"DescribeLoggingOptionsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves the current AWS IoT SiteWise logging options.

", + "endpoint":{"hostPrefix":"model."} + }, + "DescribePortal":{ + "name":"DescribePortal", + "http":{ + "method":"GET", + "requestUri":"/portals/{portalId}", + "responseCode":200 + }, + "input":{"shape":"DescribePortalRequest"}, + "output":{"shape":"DescribePortalResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about a portal.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "DescribeProject":{ + "name":"DescribeProject", + "http":{ + "method":"GET", + "requestUri":"/projects/{projectId}", + "responseCode":200 + }, + "input":{"shape":"DescribeProjectRequest"}, + "output":{"shape":"DescribeProjectResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about a project.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "DisassociateAssets":{ + "name":"DisassociateAssets", + "http":{ + "method":"POST", + "requestUri":"/assets/{assetId}/disassociate" + }, + "input":{"shape":"DisassociateAssetsRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Disassociates a child asset from the given parent asset through a hierarchy defined in the parent asset's model.

", + "endpoint":{"hostPrefix":"model."} + }, + "GetAssetPropertyAggregates":{ + "name":"GetAssetPropertyAggregates", + "http":{ + "method":"GET", + "requestUri":"/properties/aggregates" + }, + "input":{"shape":"GetAssetPropertyAggregatesRequest"}, + "output":{"shape":"GetAssetPropertyAggregatesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Gets aggregated values for an asset property. For more information, see Querying Aggregated Property Values in the AWS IoT SiteWise User Guide.

To identify an asset property, you must specify one of the following:

  • The assetId and propertyId of an asset property.

  • A propertyAlias, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see UpdateAssetProperty.

", + "endpoint":{"hostPrefix":"data."} + }, + "GetAssetPropertyValue":{ + "name":"GetAssetPropertyValue", + "http":{ + "method":"GET", + "requestUri":"/properties/latest" + }, + "input":{"shape":"GetAssetPropertyValueRequest"}, + "output":{"shape":"GetAssetPropertyValueResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Gets an asset property's current value. For more information, see Querying Current Property Values in the AWS IoT SiteWise User Guide.

To identify an asset property, you must specify one of the following:

  • The assetId and propertyId of an asset property.

  • A propertyAlias, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see UpdateAssetProperty.

", + "endpoint":{"hostPrefix":"data."} + }, + "GetAssetPropertyValueHistory":{ + "name":"GetAssetPropertyValueHistory", + "http":{ + "method":"GET", + "requestUri":"/properties/history" + }, + "input":{"shape":"GetAssetPropertyValueHistoryRequest"}, + "output":{"shape":"GetAssetPropertyValueHistoryResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Gets the history of an asset property's values. For more information, see Querying Historical Property Values in the AWS IoT SiteWise User Guide.

To identify an asset property, you must specify one of the following:

  • The assetId and propertyId of an asset property.

  • A propertyAlias, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see UpdateAssetProperty.

", + "endpoint":{"hostPrefix":"data."} + }, + "ListAccessPolicies":{ + "name":"ListAccessPolicies", + "http":{ + "method":"GET", + "requestUri":"/access-policies", + "responseCode":200 + }, + "input":{"shape":"ListAccessPoliciesRequest"}, + "output":{"shape":"ListAccessPoliciesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a paginated list of access policies for an AWS SSO identity (a user or group) or an AWS IoT SiteWise Monitor resource (a portal or project).

", + "endpoint":{"hostPrefix":"monitor."} + }, + "ListAssetModels":{ + "name":"ListAssetModels", + "http":{ + "method":"GET", + "requestUri":"/asset-models" + }, + "input":{"shape":"ListAssetModelsRequest"}, + "output":{"shape":"ListAssetModelsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a paginated list of summaries of all asset models.

", + "endpoint":{"hostPrefix":"model."} + }, + "ListAssets":{ + "name":"ListAssets", + "http":{ + "method":"GET", + "requestUri":"/assets" + }, + "input":{"shape":"ListAssetsRequest"}, + "output":{"shape":"ListAssetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a paginated list of asset summaries.

You can use this operation to do the following:

  • List assets based on a specific asset model.

  • List top-level assets.

You can't use this operation to list all assets. To retrieve summaries for all of your assets, use ListAssetModels to get all of your asset model IDs. Then, use ListAssets to get all assets for each asset model.

", + "endpoint":{"hostPrefix":"model."} + }, + "ListAssociatedAssets":{ + "name":"ListAssociatedAssets", + "http":{ + "method":"GET", + "requestUri":"/assets/{assetId}/hierarchies" + }, + "input":{"shape":"ListAssociatedAssetsRequest"}, + "output":{"shape":"ListAssociatedAssetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a paginated list of the assets associated to a parent asset (assetId) by a given hierarchy (hierarchyId).

", + "endpoint":{"hostPrefix":"model."} + }, + "ListDashboards":{ + "name":"ListDashboards", + "http":{ + "method":"GET", + "requestUri":"/dashboards", + "responseCode":200 + }, + "input":{"shape":"ListDashboardsRequest"}, + "output":{"shape":"ListDashboardsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a paginated list of dashboards for an AWS IoT SiteWise Monitor project.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "ListGateways":{ + "name":"ListGateways", + "http":{ + "method":"GET", + "requestUri":"/20200301/gateways" + }, + "input":{"shape":"ListGatewaysRequest"}, + "output":{"shape":"ListGatewaysResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a paginated list of gateways.

", + "endpoint":{"hostPrefix":"edge."} + }, + "ListPortals":{ + "name":"ListPortals", + "http":{ + "method":"GET", + "requestUri":"/portals", + "responseCode":200 + }, + "input":{"shape":"ListPortalsRequest"}, + "output":{"shape":"ListPortalsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a paginated list of AWS IoT SiteWise Monitor portals.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "ListProjectAssets":{ + "name":"ListProjectAssets", + "http":{ + "method":"GET", + "requestUri":"/projects/{projectId}/assets", + "responseCode":200 + }, + "input":{"shape":"ListProjectAssetsRequest"}, + "output":{"shape":"ListProjectAssetsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a paginated list of assets associated with an AWS IoT SiteWise Monitor project.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "ListProjects":{ + "name":"ListProjects", + "http":{ + "method":"GET", + "requestUri":"/projects", + "responseCode":200 + }, + "input":{"shape":"ListProjectsRequest"}, + "output":{"shape":"ListProjectsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a paginated list of projects for an AWS IoT SiteWise Monitor portal.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves the list of tags for an AWS IoT SiteWise resource.

" + }, + "PutLoggingOptions":{ + "name":"PutLoggingOptions", + "http":{ + "method":"PUT", + "requestUri":"/logging" + }, + "input":{"shape":"PutLoggingOptionsRequest"}, + "output":{"shape":"PutLoggingOptionsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictingOperationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Sets logging options for AWS IoT SiteWise.

", + "endpoint":{"hostPrefix":"model."} + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyTagsException"} + ], + "documentation":"

Adds tags to an AWS IoT SiteWise resource. If a tag already exists for the resource, this operation updates the tag's value.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes a tag from an AWS IoT SiteWise resource.

" + }, + "UpdateAccessPolicy":{ + "name":"UpdateAccessPolicy", + "http":{ + "method":"PUT", + "requestUri":"/access-policies/{accessPolicyId}", + "responseCode":200 + }, + "input":{"shape":"UpdateAccessPolicyRequest"}, + "output":{"shape":"UpdateAccessPolicyResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates an existing access policy that specifies an AWS SSO user or group's access to an AWS IoT SiteWise Monitor portal or project resource.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "UpdateAsset":{ + "name":"UpdateAsset", + "http":{ + "method":"PUT", + "requestUri":"/assets/{assetId}", + "responseCode":202 + }, + "input":{"shape":"UpdateAssetRequest"}, + "output":{"shape":"UpdateAssetResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Updates an asset's name. For more information, see Updating Assets and Models in the AWS IoT SiteWise User Guide.

", + "endpoint":{"hostPrefix":"model."} + }, + "UpdateAssetModel":{ + "name":"UpdateAssetModel", + "http":{ + "method":"PUT", + "requestUri":"/asset-models/{assetModelId}", + "responseCode":202 + }, + "input":{"shape":"UpdateAssetModelRequest"}, + "output":{"shape":"UpdateAssetModelResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating Assets and Models in the AWS IoT SiteWise User Guide.

This operation overwrites the existing model with the provided model. To avoid deleting your asset model's properties or hierarchies, you must include their IDs and definitions in the updated asset model payload. For more information, see DescribeAssetModel.

If you remove a property from an asset model or update a property's formula expression, AWS IoT SiteWise deletes all previous data for that property. If you remove a hierarchy definition from an asset model, AWS IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the type or data type of an existing property.

", + "endpoint":{"hostPrefix":"model."} + }, + "UpdateAssetProperty":{ + "name":"UpdateAssetProperty", + "http":{ + "method":"PUT", + "requestUri":"/assets/{assetId}/properties/{propertyId}" + }, + "input":{"shape":"UpdateAssetPropertyRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Updates an asset property's alias and notification state.

This operation overwrites the property's existing alias and notification state. To keep your existing property's alias or notification state, you must include the existing values in the UpdateAssetProperty request. For more information, see DescribeAssetProperty.

", + "endpoint":{"hostPrefix":"model."} + }, + "UpdateDashboard":{ + "name":"UpdateDashboard", + "http":{ + "method":"PUT", + "requestUri":"/dashboards/{dashboardId}", + "responseCode":200 + }, + "input":{"shape":"UpdateDashboardRequest"}, + "output":{"shape":"UpdateDashboardResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates an AWS IoT SiteWise Monitor dashboard.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "UpdateGateway":{ + "name":"UpdateGateway", + "http":{ + "method":"PUT", + "requestUri":"/20200301/gateways/{gatewayId}" + }, + "input":{"shape":"UpdateGatewayRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictingOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates a gateway's name.

", + "endpoint":{"hostPrefix":"edge."} + }, + "UpdateGatewayCapabilityConfiguration":{ + "name":"UpdateGatewayCapabilityConfiguration", + "http":{ + "method":"POST", + "requestUri":"/20200301/gateways/{gatewayId}/capability", + "responseCode":201 + }, + "input":{"shape":"UpdateGatewayCapabilityConfigurationRequest"}, + "output":{"shape":"UpdateGatewayCapabilityConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictingOperationException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates a gateway capability configuration or defines a new capability configuration. Each gateway capability defines data sources for a gateway. A capability configuration can contain multiple data source configurations. If you define OPC-UA sources for a gateway in the AWS IoT SiteWise console, all of your OPC-UA sources are stored in one capability configuration. To list all capability configurations for a gateway, use DescribeGateway.

", + "endpoint":{"hostPrefix":"edge."} + }, + "UpdatePortal":{ + "name":"UpdatePortal", + "http":{ + "method":"PUT", + "requestUri":"/portals/{portalId}", + "responseCode":202 + }, + "input":{"shape":"UpdatePortalRequest"}, + "output":{"shape":"UpdatePortalResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Updates an AWS IoT SiteWise Monitor portal.

", + "endpoint":{"hostPrefix":"monitor."} + }, + "UpdateProject":{ + "name":"UpdateProject", + "http":{ + "method":"PUT", + "requestUri":"/projects/{projectId}", + "responseCode":200 + }, + "input":{"shape":"UpdateProjectRequest"}, + "output":{"shape":"UpdateProjectResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates an AWS IoT SiteWise Monitor project.

", + "endpoint":{"hostPrefix":"monitor."} + } + }, + "shapes":{ + "ARN":{ + "type":"string", + "max":1600, + "min":1, + "pattern":".*" + }, + "AccessPolicySummaries":{ + "type":"list", + "member":{"shape":"AccessPolicySummary"} + }, + "AccessPolicySummary":{ + "type":"structure", + "required":[ + "id", + "identity", + "resource", + "permission" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the access policy.

" + }, + "identity":{ + "shape":"Identity", + "documentation":"

The AWS SSO identity (a user or group).

" + }, + "resource":{ + "shape":"Resource", + "documentation":"

The AWS IoT SiteWise Monitor resource (a portal or project).

" + }, + "permission":{ + "shape":"Permission", + "documentation":"

The permissions for the access policy. Note that a project ADMINISTRATOR is also known as a project owner.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The date the access policy was created, in Unix epoch time.

" + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the access policy was last updated, in Unix epoch time.

" + } + }, + "documentation":"

Contains an access policy that defines an AWS SSO identity's access to an AWS IoT SiteWise Monitor resource.

" + }, + "AggregateType":{ + "type":"string", + "enum":[ + "AVERAGE", + "COUNT", + "MAXIMUM", + "MINIMUM", + "SUM", + "STANDARD_DEVIATION" + ] + }, + "AggregateTypes":{ + "type":"list", + "member":{"shape":"AggregateType"}, + "min":1 + }, + "AggregatedDoubleValue":{"type":"double"}, + "AggregatedValue":{ + "type":"structure", + "required":[ + "timestamp", + "value" + ], + "members":{ + "timestamp":{ + "shape":"Timestamp", + "documentation":"

The date the aggregating computations occurred, in Unix epoch time.

" + }, + "quality":{ + "shape":"Quality", + "documentation":"

The quality of the aggregated data.

" + }, + "value":{ + "shape":"Aggregates", + "documentation":"

The value of the aggregates.

" + } + }, + "documentation":"

Contains aggregated asset property values (for example, average, minimum, and maximum).

" + }, + "AggregatedValues":{ + "type":"list", + "member":{"shape":"AggregatedValue"} + }, + "Aggregates":{ + "type":"structure", + "members":{ + "average":{ + "shape":"AggregatedDoubleValue", + "documentation":"

The average (mean) value of the time series over a time interval window.

" + }, + "count":{ + "shape":"AggregatedDoubleValue", + "documentation":"

The count of data points in the time series over a time interval window.

" + }, + "maximum":{ + "shape":"AggregatedDoubleValue", + "documentation":"

The maximum value of the time series over a time interval window.

" + }, + "minimum":{ + "shape":"AggregatedDoubleValue", + "documentation":"

The minimum value of the time series over a time interval window.

" + }, + "sum":{ + "shape":"AggregatedDoubleValue", + "documentation":"

The sum of the time series over a time interval window.

" + }, + "standardDeviation":{ + "shape":"AggregatedDoubleValue", + "documentation":"

The standard deviation of the time series over a time interval window.

" + } + }, + "documentation":"

Contains the (pre-calculated) aggregate values for an asset property.

" + }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, + "AssetErrorCode":{ + "type":"string", + "enum":["INTERNAL_FAILURE"] + }, + "AssetErrorDetails":{ + "type":"structure", + "required":[ + "assetId", + "code", + "message" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset.

" + }, + "code":{ + "shape":"AssetErrorCode", + "documentation":"

The error code.

" + }, + "message":{ + "shape":"AssetErrorMessage", + "documentation":"

The error message.

" + } + }, + "documentation":"

Contains error details for the requested associate project asset action.

" + }, + "AssetErrorMessage":{"type":"string"}, + "AssetHierarchies":{ + "type":"list", + "member":{"shape":"AssetHierarchy"} + }, + "AssetHierarchy":{ + "type":"structure", + "required":["name"], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the hierarchy. This ID is a hierarchyId.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The hierarchy name provided in the CreateAssetModel or UpdateAssetModel API.

" + } + }, + "documentation":"

Describes an asset hierarchy that contains a hierarchy's name and ID.

" + }, + "AssetIDs":{ + "type":"list", + "member":{"shape":"ID"} + }, + "AssetModelHierarchies":{ + "type":"list", + "member":{"shape":"AssetModelHierarchy"} + }, + "AssetModelHierarchy":{ + "type":"structure", + "required":[ + "name", + "childAssetModelId" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the asset model hierarchy. This ID is a hierarchyId.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the asset model hierarchy that you specify by using the CreateAssetModel or UpdateAssetModel API.

" + }, + "childAssetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model. All assets in this hierarchy must be instances of the childAssetModelId asset model.

" + } + }, + "documentation":"

Describes an asset hierarchy that contains a hierarchy's name, ID, and child asset model ID that specifies the type of asset that can be in this hierarchy.

" + }, + "AssetModelHierarchyDefinition":{ + "type":"structure", + "required":[ + "name", + "childAssetModelId" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the asset model hierarchy definition (as specified in CreateAssetModel or UpdateAssetModel).

" + }, + "childAssetModelId":{ + "shape":"ID", + "documentation":"

The ID of an asset model for this hierarchy.

" + } + }, + "documentation":"

Contains an asset model hierarchy used in asset model creation. An asset model hierarchy determines the kind (or type) of asset that can belong to a hierarchy.

" + }, + "AssetModelHierarchyDefinitions":{ + "type":"list", + "member":{"shape":"AssetModelHierarchyDefinition"} + }, + "AssetModelProperties":{ + "type":"list", + "member":{"shape":"AssetModelProperty"} + }, + "AssetModelProperty":{ + "type":"structure", + "required":[ + "name", + "dataType", + "type" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the asset model property.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the asset model property.

" + }, + "dataType":{ + "shape":"PropertyDataType", + "documentation":"

The data type of the asset model property.

" + }, + "unit":{ + "shape":"PropertyUnit", + "documentation":"

The unit of the asset model property, such as Newtons or RPM.

" + }, + "type":{ + "shape":"PropertyType", + "documentation":"

The property type (see PropertyType).

" + } + }, + "documentation":"

Contains information about an asset model property.

" + }, + "AssetModelPropertyDefinition":{ + "type":"structure", + "required":[ + "name", + "dataType", + "type" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the property definition.

" + }, + "dataType":{ + "shape":"PropertyDataType", + "documentation":"

The data type of the property definition.

" + }, + "unit":{ + "shape":"PropertyUnit", + "documentation":"

The unit of the property definition, such as Newtons or RPM.

" + }, + "type":{ + "shape":"PropertyType", + "documentation":"

The property definition type (see PropertyType). You can only specify one type in a property definition.

" + } + }, + "documentation":"

Contains an asset model property definition. This property definition is applied to all assets created from the asset model.

" + }, + "AssetModelPropertyDefinitions":{ + "type":"list", + "member":{"shape":"AssetModelPropertyDefinition"} + }, + "AssetModelState":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "PROPAGATING", + "DELETING", + "FAILED" + ] + }, + "AssetModelStatus":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{ + "shape":"AssetModelState", + "documentation":"

The current state of the asset model.

" + }, + "error":{ + "shape":"ErrorDetails", + "documentation":"

Contains associated error information, if any.

" + } + }, + "documentation":"

Contains current status information for an asset model. For more information, see Asset and Model States in the AWS IoT SiteWise User Guide.

" + }, + "AssetModelSummaries":{ + "type":"list", + "member":{"shape":"AssetModelSummary"} + }, + "AssetModelSummary":{ + "type":"structure", + "required":[ + "id", + "arn", + "name", + "description", + "creationDate", + "lastUpdateDate", + "status" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the asset model (used with AWS IoT SiteWise APIs).

" + }, + "arn":{ + "shape":"ARN", + "documentation":"

The ARN of the asset model, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the asset model.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The asset model description.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The date the asset model was created, in Unix epoch time.

" + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the asset model was last updated, in Unix epoch time.

" + }, + "status":{ + "shape":"AssetModelStatus", + "documentation":"

The current status of the asset model.

" + } + }, + "documentation":"

Contains a summary of an asset model.

" + }, + "AssetProperties":{ + "type":"list", + "member":{"shape":"AssetProperty"} + }, + "AssetProperty":{ + "type":"structure", + "required":[ + "id", + "name", + "dataType" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the property.

" + }, + "alias":{ + "shape":"PropertyAlias", + "documentation":"

The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping Industrial Data Streams to Asset Properties in the AWS IoT SiteWise User Guide.

" + }, + "notification":{ + "shape":"PropertyNotification", + "documentation":"

The asset property's notification topic and state. For more information, see UpdateAssetProperty

" + }, + "dataType":{ + "shape":"PropertyDataType", + "documentation":"

The data type of the asset property.

" + }, + "unit":{ + "shape":"PropertyUnit", + "documentation":"

The unit (such as Newtons or RPM) of the asset property.

" + } + }, + "documentation":"

Contains asset property information.

" + }, + "AssetPropertyAlias":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "AssetPropertyValue":{ + "type":"structure", + "required":[ + "value", + "timestamp" + ], + "members":{ + "value":{ + "shape":"Variant", + "documentation":"

The value of the asset property (see Variant).

" + }, + "timestamp":{ + "shape":"TimeInNanos", + "documentation":"

The timestamp of the asset property value.

" + }, + "quality":{ + "shape":"Quality", + "documentation":"

The quality of the asset property value.

" + } + }, + "documentation":"

Contains asset property value information.

" + }, + "AssetPropertyValueHistory":{ + "type":"list", + "member":{"shape":"AssetPropertyValue"} + }, + "AssetPropertyValues":{ + "type":"list", + "member":{"shape":"AssetPropertyValue"} + }, + "AssetState":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "FAILED" + ] + }, + "AssetStatus":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{ + "shape":"AssetState", + "documentation":"

The current status of the asset.

" + }, + "error":{ + "shape":"ErrorDetails", + "documentation":"

Contains associated error information, if any.

" + } + }, + "documentation":"

Contains information about the current status of an asset. For more information, see Asset and Model States in the AWS IoT SiteWise User Guide.

" + }, + "AssetSummaries":{ + "type":"list", + "member":{"shape":"AssetSummary"} + }, + "AssetSummary":{ + "type":"structure", + "required":[ + "id", + "arn", + "name", + "assetModelId", + "creationDate", + "lastUpdateDate", + "status", + "hierarchies" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the asset.

" + }, + "arn":{ + "shape":"ARN", + "documentation":"

The ARN of the asset, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the asset.

" + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model used to create this asset.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The date the asset was created, in Unix epoch time.

" + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the asset was last updated, in Unix epoch time.

" + }, + "status":{ + "shape":"AssetStatus", + "documentation":"

The current status of the asset.

" + }, + "hierarchies":{ + "shape":"AssetHierarchies", + "documentation":"

A list of asset hierarchies that each contain a hierarchyId. A hierarchy specifies allowed parent/child asset relationships.

" + } + }, + "documentation":"

Contains a summary of an asset.

" + }, + "AssociateAssetsRequest":{ + "type":"structure", + "required":[ + "assetId", + "hierarchyId", + "childAssetId" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the parent asset.

", + "location":"uri", + "locationName":"assetId" + }, + "hierarchyId":{ + "shape":"ID", + "documentation":"

The ID of a hierarchy in the parent asset's model. Hierarchies allow different groupings of assets to be formed that all come from the same asset model. For more information, see Asset Hierarchies in the AWS IoT SiteWise User Guide.

" + }, + "childAssetId":{ + "shape":"ID", + "documentation":"

The ID of the child asset to be associated.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "AssociatedAssetsSummaries":{ + "type":"list", + "member":{"shape":"AssociatedAssetsSummary"} + }, + "AssociatedAssetsSummary":{ + "type":"structure", + "required":[ + "id", + "arn", + "name", + "assetModelId", + "creationDate", + "lastUpdateDate", + "status", + "hierarchies" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the asset.

" + }, + "arn":{ + "shape":"ARN", + "documentation":"

The ARN of the asset, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the asset.

" + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model used to create the asset.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The date the asset was created, in Unix epoch time.

" + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the asset was last updated, in Unix epoch time.

" + }, + "status":{ + "shape":"AssetStatus", + "documentation":"

The current status of the asset.

" + }, + "hierarchies":{ + "shape":"AssetHierarchies", + "documentation":"

A list of asset hierarchies that each contain a hierarchyId. A hierarchy specifies allowed parent/child asset relationships.

" + } + }, + "documentation":"

Contains a summary of an associated asset.

" + }, + "Attribute":{ + "type":"structure", + "members":{ + "defaultValue":{ + "shape":"DefaultValue", + "documentation":"

The default value of the asset model property attribute. All assets that you create from the asset model contain this attribute value. You can update an attribute's value after you create an asset. For more information, see Updating Attribute Values in the AWS IoT SiteWise User Guide.

" + } + }, + "documentation":"

Contains an asset attribute property. For more information, see Attributes in the AWS IoT SiteWise User Guide.

" + }, + "BatchAssociateProjectAssetsErrors":{ + "type":"list", + "member":{"shape":"AssetErrorDetails"} + }, + "BatchAssociateProjectAssetsRequest":{ + "type":"structure", + "required":[ + "projectId", + "assetIds" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project to which to associate the assets.

", + "location":"uri", + "locationName":"projectId" + }, + "assetIds":{ + "shape":"IDs", + "documentation":"

The IDs of the assets to be associated to the project.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "BatchAssociateProjectAssetsResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"BatchAssociateProjectAssetsErrors", + "documentation":"

A list of associated error information, if any.

" + } + } + }, + "BatchDisassociateProjectAssetsErrors":{ + "type":"list", + "member":{"shape":"AssetErrorDetails"} + }, + "BatchDisassociateProjectAssetsRequest":{ + "type":"structure", + "required":[ + "projectId", + "assetIds" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project from which to disassociate the assets.

", + "location":"uri", + "locationName":"projectId" + }, + "assetIds":{ + "shape":"IDs", + "documentation":"

The IDs of the assets to be disassociated from the project.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "BatchDisassociateProjectAssetsResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"BatchDisassociateProjectAssetsErrors", + "documentation":"

A list of associated error information, if any.

" + } + } + }, + "BatchPutAssetPropertyError":{ + "type":"structure", + "required":[ + "errorCode", + "errorMessage", + "timestamps" + ], + "members":{ + "errorCode":{ + "shape":"BatchPutAssetPropertyValueErrorCode", + "documentation":"

The error code.

" + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

The associated error message.

" + }, + "timestamps":{ + "shape":"Timestamps", + "documentation":"

A list of timestamps for each error, if any.

" + } + }, + "documentation":"

Contains error information from updating a batch of asset property values.

" + }, + "BatchPutAssetPropertyErrorEntries":{ + "type":"list", + "member":{"shape":"BatchPutAssetPropertyErrorEntry"} + }, + "BatchPutAssetPropertyErrorEntry":{ + "type":"structure", + "required":[ + "entryId", + "errors" + ], + "members":{ + "entryId":{ + "shape":"EntryId", + "documentation":"

The ID of the failed entry.

" + }, + "errors":{ + "shape":"BatchPutAssetPropertyErrors", + "documentation":"

The list of update property value errors.

" + } + }, + "documentation":"

Contains error information for asset property value entries that are associated with the BatchPutAssetPropertyValue API.

" + }, + "BatchPutAssetPropertyErrors":{ + "type":"list", + "member":{"shape":"BatchPutAssetPropertyError"} + }, + "BatchPutAssetPropertyValueErrorCode":{ + "type":"string", + "enum":[ + "ResourceNotFoundException", + "InvalidRequestException", + "InternalFailureException", + "ServiceUnavailableException", + "ThrottlingException", + "LimitExceededException", + "ConflictingOperationException", + "TimestampOutOfRangeException", + "AccessDeniedException" + ] + }, + "BatchPutAssetPropertyValueRequest":{ + "type":"structure", + "required":["entries"], + "members":{ + "entries":{ + "shape":"PutAssetPropertyValueEntries", + "documentation":"

The list of asset property value entries for the batch put request. You can specify up to 10 entries per request.

" + } + } + }, + "BatchPutAssetPropertyValueResponse":{ + "type":"structure", + "required":["errorEntries"], + "members":{ + "errorEntries":{ + "shape":"BatchPutAssetPropertyErrorEntries", + "documentation":"

A list of the errors (if any) associated with the batch put request. Each error entry contains the entryId of the entry that failed.

" + } + } + }, + "CapabilityConfiguration":{ + "type":"string", + "max":204800, + "min":1 + }, + "CapabilityNamespace":{ + "type":"string", + "max":512, + "min":1, + "pattern":"^[a-zA-Z]+:[a-zA-Z]+:[0-9]+$" + }, + "CapabilitySyncStatus":{ + "type":"string", + "enum":[ + "IN_SYNC", + "OUT_OF_SYNC", + "SYNC_FAILED" + ] + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":36, + "pattern":"\\S{36,64}" + }, + "ConflictingOperationException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceArn" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "resourceId":{ + "shape":"ResourceId", + "documentation":"

The ID of the resource that conflicts with this operation.

" + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the resource that conflicts with this operation.

" + } + }, + "documentation":"

Your request has conflicting operations. This can occur if you're trying to perform more than one operation on the same resource at the same time.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateAccessPolicyRequest":{ + "type":"structure", + "required":[ + "accessPolicyIdentity", + "accessPolicyResource", + "accessPolicyPermission" + ], + "members":{ + "accessPolicyIdentity":{ + "shape":"Identity", + "documentation":"

The identity for this access policy. Choose either a user or a group but not both.

" + }, + "accessPolicyResource":{ + "shape":"Resource", + "documentation":"

The AWS IoT SiteWise Monitor resource for this access policy. Choose either portal or project but not both.

" + }, + "accessPolicyPermission":{ + "shape":"Permission", + "documentation":"

The permission level for this access policy. Note that a project ADMINISTRATOR is also known as a project owner.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A list of key-value pairs that contain metadata for the access policy. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

" + } + } + }, + "CreateAccessPolicyResponse":{ + "type":"structure", + "required":[ + "accessPolicyId", + "accessPolicyArn" + ], + "members":{ + "accessPolicyId":{ + "shape":"ID", + "documentation":"

The ID of the access policy.

" + }, + "accessPolicyArn":{ + "shape":"ARN", + "documentation":"

The ARN of the access policy, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:access-policy/${AccessPolicyId}

" + } + } + }, + "CreateAssetModelRequest":{ + "type":"structure", + "required":["assetModelName"], + "members":{ + "assetModelName":{ + "shape":"Name", + "documentation":"

A unique, friendly name for the asset model.

" + }, + "assetModelDescription":{ + "shape":"Description", + "documentation":"

A description for the asset model.

" + }, + "assetModelProperties":{ + "shape":"AssetModelPropertyDefinitions", + "documentation":"

The property definitions of the asset model. For more information, see Asset Properties in the AWS IoT SiteWise User Guide.

You can specify up to 200 properties per asset model. For more information, see Quotas in the AWS IoT SiteWise User Guide.

" + }, + "assetModelHierarchies":{ + "shape":"AssetModelHierarchyDefinitions", + "documentation":"

The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see Asset Hierarchies in the AWS IoT SiteWise User Guide.

You can specify up to 10 hierarchies per asset model. For more information, see Quotas in the AWS IoT SiteWise User Guide.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A list of key-value pairs that contain metadata for the asset model. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

" + } + } + }, + "CreateAssetModelResponse":{ + "type":"structure", + "required":[ + "assetModelId", + "assetModelArn", + "assetModelStatus" + ], + "members":{ + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model. You can use this ID when you call other AWS IoT SiteWise APIs.

" + }, + "assetModelArn":{ + "shape":"ARN", + "documentation":"

The ARN of the asset model, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}

" + }, + "assetModelStatus":{ + "shape":"AssetModelStatus", + "documentation":"

The status of the asset model, which contains a state (CREATING after successfully calling this operation) and any error message.

" + } + } + }, + "CreateAssetRequest":{ + "type":"structure", + "required":[ + "assetName", + "assetModelId" + ], + "members":{ + "assetName":{ + "shape":"Name", + "documentation":"

A unique, friendly name for the asset.

" + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model from which to create the asset.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A list of key-value pairs that contain metadata for the asset. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

" + } + } + }, + "CreateAssetResponse":{ + "type":"structure", + "required":[ + "assetId", + "assetArn", + "assetStatus" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset. This ID uniquely identifies the asset within AWS IoT SiteWise and can be used with other AWS IoT SiteWise APIs.

" + }, + "assetArn":{ + "shape":"ARN", + "documentation":"

The ARN of the asset, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

" + }, + "assetStatus":{ + "shape":"AssetStatus", + "documentation":"

The status of the asset, which contains a state (CREATING after successfully calling this operation) and any error message.

" + } + } + }, + "CreateDashboardRequest":{ + "type":"structure", + "required":[ + "projectId", + "dashboardName", + "dashboardDefinition" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project in which to create the dashboard.

" + }, + "dashboardName":{ + "shape":"Name", + "documentation":"

A friendly name for the dashboard.

" + }, + "dashboardDescription":{ + "shape":"Description", + "documentation":"

A description for the dashboard.

" + }, + "dashboardDefinition":{ + "shape":"DashboardDefinition", + "documentation":"

The dashboard definition specified in a JSON literal. For detailed information, see Creating Dashboards (CLI) in the AWS IoT SiteWise User Guide.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A list of key-value pairs that contain metadata for the dashboard. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

" + } + } + }, + "CreateDashboardResponse":{ + "type":"structure", + "required":[ + "dashboardId", + "dashboardArn" + ], + "members":{ + "dashboardId":{ + "shape":"ID", + "documentation":"

The ID of the dashboard.

" + }, + "dashboardArn":{ + "shape":"ARN", + "documentation":"

The ARN of the dashboard, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:dashboard/${DashboardId}

" + } + } + }, + "CreateGatewayRequest":{ + "type":"structure", + "required":[ + "gatewayName", + "gatewayPlatform" + ], + "members":{ + "gatewayName":{ + "shape":"Name", + "documentation":"

A unique, friendly name for the gateway.

" + }, + "gatewayPlatform":{ + "shape":"GatewayPlatform", + "documentation":"

The gateway's platform. You can only specify one platform in a gateway.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A list of key-value pairs that contain metadata for the gateway. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

" + } + } + }, + "CreateGatewayResponse":{ + "type":"structure", + "required":[ + "gatewayId", + "gatewayArn" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

The ID of the gateway device. You can use this ID when you call other AWS IoT SiteWise APIs.

" + }, + "gatewayArn":{ + "shape":"ARN", + "documentation":"

The ARN of the gateway, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:gateway/${GatewayId}

" + } + } + }, + "CreatePortalRequest":{ + "type":"structure", + "required":[ + "portalName", + "portalContactEmail", + "roleArn" + ], + "members":{ + "portalName":{ + "shape":"Name", + "documentation":"

A friendly name for the portal.

" + }, + "portalDescription":{ + "shape":"Description", + "documentation":"

A description for the portal.

" + }, + "portalContactEmail":{ + "shape":"Email", + "documentation":"

The AWS administrator's contact email address.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + }, + "portalLogoImageFile":{ + "shape":"ImageFile", + "documentation":"

A logo image to display in the portal. Upload a square, high-resolution image. The image is displayed on a dark background.

" + }, + "roleArn":{ + "shape":"ARN", + "documentation":"

The ARN of a service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A list of key-value pairs that contain metadata for the portal. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

" + } + } + }, + "CreatePortalResponse":{ + "type":"structure", + "required":[ + "portalId", + "portalArn", + "portalStartUrl", + "portalStatus", + "ssoApplicationId" + ], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

The ID of the created portal.

" + }, + "portalArn":{ + "shape":"ARN", + "documentation":"

The ARN of the portal, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:portal/${PortalId}

" + }, + "portalStartUrl":{ + "shape":"Url", + "documentation":"

The public URL for the AWS IoT SiteWise Monitor portal.

" + }, + "portalStatus":{ + "shape":"PortalStatus", + "documentation":"

The status of the portal, which contains a state (CREATING after successfully calling this operation) and any error message.

" + }, + "ssoApplicationId":{ + "shape":"SSOApplicationId", + "documentation":"

The associated AWS SSO application Id.

" + } + } + }, + "CreateProjectRequest":{ + "type":"structure", + "required":[ + "portalId", + "projectName" + ], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

The ID of the portal in which to create the project.

" + }, + "projectName":{ + "shape":"Name", + "documentation":"

A friendly name for the project.

" + }, + "projectDescription":{ + "shape":"Description", + "documentation":"

A description for the project.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A list of key-value pairs that contain metadata for the project. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

" + } + } + }, + "CreateProjectResponse":{ + "type":"structure", + "required":[ + "projectId", + "projectArn" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project.

" + }, + "projectArn":{ + "shape":"ARN", + "documentation":"

The ARN of the project, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:project/${ProjectId}

" + } + } + }, + "DashboardDefinition":{ + "type":"string", + "max":204800, + "min":0, + "pattern":".+" + }, + "DashboardSummaries":{ + "type":"list", + "member":{"shape":"DashboardSummary"} + }, + "DashboardSummary":{ + "type":"structure", + "required":[ + "id", + "name" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the dashboard.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the dashboard

" + }, + "description":{ + "shape":"Description", + "documentation":"

The dashboard's description.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The date the dashboard was created, in Unix epoch time.

" + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the dashboard was last updated, in Unix epoch time.

" + } + }, + "documentation":"

Contains a dashboard summary.

" + }, + "DefaultValue":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "DeleteAccessPolicyRequest":{ + "type":"structure", + "required":["accessPolicyId"], + "members":{ + "accessPolicyId":{ + "shape":"ID", + "documentation":"

The ID of the access policy to be deleted.

", + "location":"uri", + "locationName":"accessPolicyId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteAccessPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteAssetModelRequest":{ + "type":"structure", + "required":["assetModelId"], + "members":{ + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model to delete.

", + "location":"uri", + "locationName":"assetModelId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteAssetModelResponse":{ + "type":"structure", + "required":["assetModelStatus"], + "members":{ + "assetModelStatus":{ + "shape":"AssetModelStatus", + "documentation":"

The status of the asset model, which contains a state (DELETING after successfully calling this operation) and any error message.

" + } + } + }, + "DeleteAssetRequest":{ + "type":"structure", + "required":["assetId"], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset to delete.

", + "location":"uri", + "locationName":"assetId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteAssetResponse":{ + "type":"structure", + "required":["assetStatus"], + "members":{ + "assetStatus":{ + "shape":"AssetStatus", + "documentation":"

The status of the asset, which contains a state (DELETING after successfully calling this operation) and any error message.

" + } + } + }, + "DeleteDashboardRequest":{ + "type":"structure", + "required":["dashboardId"], + "members":{ + "dashboardId":{ + "shape":"ID", + "documentation":"

The ID of the dashboard to delete.

", + "location":"uri", + "locationName":"dashboardId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteDashboardResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteGatewayRequest":{ + "type":"structure", + "required":["gatewayId"], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

The ID of the gateway to delete.

", + "location":"uri", + "locationName":"gatewayId" + } + } + }, + "DeletePortalRequest":{ + "type":"structure", + "required":["portalId"], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

The ID of the portal to delete.

", + "location":"uri", + "locationName":"portalId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeletePortalResponse":{ + "type":"structure", + "required":["portalStatus"], + "members":{ + "portalStatus":{ + "shape":"PortalStatus", + "documentation":"

The status of the portal, which contains a state (DELETING after successfully calling this operation) and any error message.

" + } + } + }, + "DeleteProjectRequest":{ + "type":"structure", + "required":["projectId"], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project.

", + "location":"uri", + "locationName":"projectId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + } + } + }, + "DeleteProjectResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeAccessPolicyRequest":{ + "type":"structure", + "required":["accessPolicyId"], + "members":{ + "accessPolicyId":{ + "shape":"ID", + "documentation":"

The ID of the access policy.

", + "location":"uri", + "locationName":"accessPolicyId" + } + } + }, + "DescribeAccessPolicyResponse":{ + "type":"structure", + "required":[ + "accessPolicyId", + "accessPolicyArn", + "accessPolicyIdentity", + "accessPolicyResource", + "accessPolicyPermission", + "accessPolicyCreationDate", + "accessPolicyLastUpdateDate" + ], + "members":{ + "accessPolicyId":{ + "shape":"ID", + "documentation":"

The ID of the access policy.

" + }, + "accessPolicyArn":{ + "shape":"ARN", + "documentation":"

The ARN of the access policy, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:access-policy/${AccessPolicyId}

" + }, + "accessPolicyIdentity":{ + "shape":"Identity", + "documentation":"

The AWS SSO identity (user or group) to which this access policy applies.

" + }, + "accessPolicyResource":{ + "shape":"Resource", + "documentation":"

The AWS IoT SiteWise Monitor resource (portal or project) to which this access policy provides access.

" + }, + "accessPolicyPermission":{ + "shape":"Permission", + "documentation":"

The access policy permission. Note that a project ADMINISTRATOR is also known as a project owner.

" + }, + "accessPolicyCreationDate":{ + "shape":"Timestamp", + "documentation":"

The date the access policy was created, in Unix epoch time.

" + }, + "accessPolicyLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the access policy was last updated, in Unix epoch time.

" + } + } + }, + "DescribeAssetModelRequest":{ + "type":"structure", + "required":["assetModelId"], + "members":{ + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model.

", + "location":"uri", + "locationName":"assetModelId" + } + } + }, + "DescribeAssetModelResponse":{ + "type":"structure", + "required":[ + "assetModelId", + "assetModelArn", + "assetModelName", + "assetModelDescription", + "assetModelProperties", + "assetModelHierarchies", + "assetModelCreationDate", + "assetModelLastUpdateDate", + "assetModelStatus" + ], + "members":{ + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model.

" + }, + "assetModelArn":{ + "shape":"ARN", + "documentation":"

The ARN of the asset model, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}

" + }, + "assetModelName":{ + "shape":"Name", + "documentation":"

The name of the asset model.

" + }, + "assetModelDescription":{ + "shape":"Description", + "documentation":"

The asset model's description.

" + }, + "assetModelProperties":{ + "shape":"AssetModelProperties", + "documentation":"

The list of asset properties for the asset model.

" + }, + "assetModelHierarchies":{ + "shape":"AssetModelHierarchies", + "documentation":"

A list of asset model hierarchies that each contain a childAssetModelId and a hierarchyId (named id). A hierarchy specifies allowed parent/child asset relationships for an asset model.

" + }, + "assetModelCreationDate":{ + "shape":"Timestamp", + "documentation":"

The date the asset model was created, in Unix epoch time.

" + }, + "assetModelLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the asset model was last updated, in Unix epoch time.

" + }, + "assetModelStatus":{ + "shape":"AssetModelStatus", + "documentation":"

The current status of the asset model, which contains a state and any error message.

" + } + } + }, + "DescribeAssetPropertyRequest":{ + "type":"structure", + "required":[ + "assetId", + "propertyId" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset.

", + "location":"uri", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

", + "location":"uri", + "locationName":"propertyId" + } + } + }, + "DescribeAssetPropertyResponse":{ + "type":"structure", + "required":[ + "assetId", + "assetName", + "assetModelId", + "assetProperty" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset.

" + }, + "assetName":{ + "shape":"Name", + "documentation":"

The name of the asset.

" + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model.

" + }, + "assetProperty":{ + "shape":"Property", + "documentation":"

The asset property's definition, alias, and notification state.

" + } + } + }, + "DescribeAssetRequest":{ + "type":"structure", + "required":["assetId"], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset.

", + "location":"uri", + "locationName":"assetId" + } + } + }, + "DescribeAssetResponse":{ + "type":"structure", + "required":[ + "assetId", + "assetArn", + "assetName", + "assetModelId", + "assetProperties", + "assetHierarchies", + "assetCreationDate", + "assetLastUpdateDate", + "assetStatus" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset.

" + }, + "assetArn":{ + "shape":"ARN", + "documentation":"

The ARN of the asset, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:asset/${AssetId}

" + }, + "assetName":{ + "shape":"Name", + "documentation":"

The name of the asset.

" + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model that was used to create the asset.

" + }, + "assetProperties":{ + "shape":"AssetProperties", + "documentation":"

The list of asset properties for the asset.

" + }, + "assetHierarchies":{ + "shape":"AssetHierarchies", + "documentation":"

A list of asset hierarchies that each contain a hierarchyId. A hierarchy specifies allowed parent/child asset relationships.

" + }, + "assetCreationDate":{ + "shape":"Timestamp", + "documentation":"

The date the asset was created, in Unix epoch time.

" + }, + "assetLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the asset was last updated, in Unix epoch time.

" + }, + "assetStatus":{ + "shape":"AssetStatus", + "documentation":"

The current status of the asset, which contains a state and any error message.

" + } + } + }, + "DescribeDashboardRequest":{ + "type":"structure", + "required":["dashboardId"], + "members":{ + "dashboardId":{ + "shape":"ID", + "documentation":"

The ID of the dashboard.

", + "location":"uri", + "locationName":"dashboardId" + } + } + }, + "DescribeDashboardResponse":{ + "type":"structure", + "required":[ + "dashboardId", + "dashboardArn", + "dashboardName", + "projectId", + "dashboardDefinition", + "dashboardCreationDate", + "dashboardLastUpdateDate" + ], + "members":{ + "dashboardId":{ + "shape":"ID", + "documentation":"

The ID of the dashboard.

" + }, + "dashboardArn":{ + "shape":"ARN", + "documentation":"

The ARN of the dashboard, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:dashboard/${DashboardId}

" + }, + "dashboardName":{ + "shape":"Name", + "documentation":"

The name of the dashboard.

" + }, + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project that the dashboard is in.

" + }, + "dashboardDescription":{ + "shape":"Description", + "documentation":"

The dashboard's description.

" + }, + "dashboardDefinition":{ + "shape":"DashboardDefinition", + "documentation":"

The dashboard's definition JSON literal. For detailed information, see Creating Dashboards (CLI) in the AWS IoT SiteWise User Guide.

" + }, + "dashboardCreationDate":{ + "shape":"Timestamp", + "documentation":"

The date the dashboard was created, in Unix epoch time.

" + }, + "dashboardLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the dashboard was last updated, in Unix epoch time.

" + } + } + }, + "DescribeGatewayCapabilityConfigurationRequest":{ + "type":"structure", + "required":[ + "gatewayId", + "capabilityNamespace" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

The ID of the gateway that defines the capability configuration.

", + "location":"uri", + "locationName":"gatewayId" + }, + "capabilityNamespace":{ + "shape":"CapabilityNamespace", + "documentation":"

The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace iotsitewise:opcuacollector:version, where version is a number such as 1.

", + "location":"uri", + "locationName":"capabilityNamespace" + } + } + }, + "DescribeGatewayCapabilityConfigurationResponse":{ + "type":"structure", + "required":[ + "gatewayId", + "capabilityNamespace", + "capabilityConfiguration", + "capabilitySyncStatus" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

The ID of the gateway that defines the capability configuration.

" + }, + "capabilityNamespace":{ + "shape":"CapabilityNamespace", + "documentation":"

The namespace of the gateway capability.

" + }, + "capabilityConfiguration":{ + "shape":"CapabilityConfiguration", + "documentation":"

The JSON document that defines the gateway capability's configuration. For more information, see Configuring data sources (CLI) in the AWS IoT SiteWise User Guide.

" + }, + "capabilitySyncStatus":{ + "shape":"CapabilitySyncStatus", + "documentation":"

The synchronization status of the capability configuration. The sync status can be one of the following:

  • IN_SYNC – The gateway is running the capability configuration.

  • OUT_OF_SYNC – The gateway hasn't received the capability configuration.

  • SYNC_FAILED – The gateway rejected the capability configuration.

" + } + } + }, + "DescribeGatewayRequest":{ + "type":"structure", + "required":["gatewayId"], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

The ID of the gateway device.

", + "location":"uri", + "locationName":"gatewayId" + } + } + }, + "DescribeGatewayResponse":{ + "type":"structure", + "required":[ + "gatewayId", + "gatewayName", + "gatewayArn", + "gatewayCapabilitySummaries", + "creationDate", + "lastUpdateDate" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

The ID of the gateway device.

" + }, + "gatewayName":{ + "shape":"Name", + "documentation":"

The name of the gateway.

" + }, + "gatewayArn":{ + "shape":"ARN", + "documentation":"

The ARN of the gateway, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:gateway/${GatewayId}

" + }, + "gatewayPlatform":{ + "shape":"GatewayPlatform", + "documentation":"

The gateway's platform.

" + }, + "gatewayCapabilitySummaries":{ + "shape":"GatewayCapabilitySummaries", + "documentation":"

A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use DescribeGatewayCapabilityConfiguration.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The date the gateway was created, in Unix epoch time.

" + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the gateway was last updated, in Unix epoch time.

" + } + } + }, + "DescribeLoggingOptionsRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeLoggingOptionsResponse":{ + "type":"structure", + "required":["loggingOptions"], + "members":{ + "loggingOptions":{ + "shape":"LoggingOptions", + "documentation":"

The current logging options.

" + } + } + }, + "DescribePortalRequest":{ + "type":"structure", + "required":["portalId"], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

The ID of the portal.

", + "location":"uri", + "locationName":"portalId" + } + } + }, + "DescribePortalResponse":{ + "type":"structure", + "required":[ + "portalId", + "portalArn", + "portalName", + "portalClientId", + "portalStartUrl", + "portalContactEmail", + "portalStatus", + "portalCreationDate", + "portalLastUpdateDate" + ], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

The ID of the portal.

" + }, + "portalArn":{ + "shape":"ARN", + "documentation":"

The ARN of the portal, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:portal/${PortalId}

" + }, + "portalName":{ + "shape":"Name", + "documentation":"

The name of the portal.

" + }, + "portalDescription":{ + "shape":"Description", + "documentation":"

The portal's description.

" + }, + "portalClientId":{ + "shape":"PortalClientId", + "documentation":"

The AWS SSO application generated client ID (used with AWS SSO APIs).

" + }, + "portalStartUrl":{ + "shape":"Url", + "documentation":"

The public root URL for the AWS IoT AWS IoT SiteWise Monitor application portal.

" + }, + "portalContactEmail":{ + "shape":"Email", + "documentation":"

The AWS administrator's contact email address.

" + }, + "portalStatus":{ + "shape":"PortalStatus", + "documentation":"

The current status of the portal, which contains a state and any error message.

" + }, + "portalCreationDate":{ + "shape":"Timestamp", + "documentation":"

The date the portal was created, in Unix epoch time.

" + }, + "portalLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the portal was last updated, in Unix epoch time.

" + }, + "portalLogoImageLocation":{ + "shape":"ImageLocation", + "documentation":"

The portal's logo image, which is available at a URL.

" + }, + "roleArn":{ + "shape":"ARN", + "documentation":"

The ARN of the service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

" + } + } + }, + "DescribeProjectRequest":{ + "type":"structure", + "required":["projectId"], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project.

", + "location":"uri", + "locationName":"projectId" + } + } + }, + "DescribeProjectResponse":{ + "type":"structure", + "required":[ + "projectId", + "projectArn", + "projectName", + "portalId", + "projectCreationDate", + "projectLastUpdateDate" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project.

" + }, + "projectArn":{ + "shape":"ARN", + "documentation":"

The ARN of the project, which has the following format.

arn:${Partition}:iotsitewise:${Region}:${Account}:project/${ProjectId}

" + }, + "projectName":{ + "shape":"Name", + "documentation":"

The name of the project.

" + }, + "portalId":{ + "shape":"ID", + "documentation":"

The ID of the portal that the project is in.

" + }, + "projectDescription":{ + "shape":"Description", + "documentation":"

The project's description.

" + }, + "projectCreationDate":{ + "shape":"Timestamp", + "documentation":"

The date the project was created, in Unix epoch time.

" + }, + "projectLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the project was last updated, in Unix epoch time.

" + } + } + }, + "Description":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "DisassociateAssetsRequest":{ + "type":"structure", + "required":[ + "assetId", + "hierarchyId", + "childAssetId" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the parent asset from which to disassociate the child asset.

", + "location":"uri", + "locationName":"assetId" + }, + "hierarchyId":{ + "shape":"ID", + "documentation":"

The ID of a hierarchy in the parent asset's model. Hierarchies allow different groupings of assets to be formed that all come from the same asset model. You can use the hierarchy ID to identify the correct asset to disassociate. For more information, see Asset Hierarchies in the AWS IoT SiteWise User Guide.

" + }, + "childAssetId":{ + "shape":"ID", + "documentation":"

The ID of the child asset to disassociate.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "Email":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[^@]+@[^@]+" + }, + "EntryId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9_-]+$" + }, + "ErrorCode":{ + "type":"string", + "enum":[ + "VALIDATION_ERROR", + "INTERNAL_FAILURE" + ] + }, + "ErrorDetails":{ + "type":"structure", + "required":[ + "code", + "message" + ], + "members":{ + "code":{ + "shape":"ErrorCode", + "documentation":"

The error code.

" + }, + "message":{ + "shape":"ErrorMessage", + "documentation":"

The error message.

" + } + }, + "documentation":"

Contains the details of an AWS IoT SiteWise error.

" + }, + "ErrorMessage":{"type":"string"}, + "ExceptionMessage":{"type":"string"}, + "Expression":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[a-z0-9._+\\-*%/^, ()]+$" + }, + "ExpressionVariable":{ + "type":"structure", + "required":[ + "name", + "value" + ], + "members":{ + "name":{ + "shape":"VariableName", + "documentation":"

The friendly name of the variable to be used in the expression.

" + }, + "value":{ + "shape":"VariableValue", + "documentation":"

The variable that identifies an asset property from which to use values.

" + } + }, + "documentation":"

Contains expression variable information.

" + }, + "ExpressionVariables":{ + "type":"list", + "member":{"shape":"ExpressionVariable"} + }, + "GatewayCapabilitySummaries":{ + "type":"list", + "member":{"shape":"GatewayCapabilitySummary"} + }, + "GatewayCapabilitySummary":{ + "type":"structure", + "required":[ + "capabilityNamespace", + "capabilitySyncStatus" + ], + "members":{ + "capabilityNamespace":{ + "shape":"CapabilityNamespace", + "documentation":"

The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace iotsitewise:opcuacollector:version, where version is a number such as 1.

" + }, + "capabilitySyncStatus":{ + "shape":"CapabilitySyncStatus", + "documentation":"

The synchronization status of the capability configuration. The sync status can be one of the following:

  • IN_SYNC – The gateway is running the capability configuration.

  • OUT_OF_SYNC – The gateway hasn't received the capability configuration.

  • SYNC_FAILED – The gateway rejected the capability configuration.

" + } + }, + "documentation":"

Contains a summary of a gateway capability configuration.

" + }, + "GatewayPlatform":{ + "type":"structure", + "required":["greengrass"], + "members":{ + "greengrass":{ + "shape":"Greengrass", + "documentation":"

A gateway that runs on AWS IoT Greengrass.

" + } + }, + "documentation":"

Contains a gateway's platform information.

" + }, + "GatewaySummaries":{ + "type":"list", + "member":{"shape":"GatewaySummary"} + }, + "GatewaySummary":{ + "type":"structure", + "required":[ + "gatewayId", + "gatewayName", + "creationDate", + "lastUpdateDate" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

The ID of the gateway device.

" + }, + "gatewayName":{ + "shape":"Name", + "documentation":"

The name of the asset.

" + }, + "gatewayCapabilitySummaries":{ + "shape":"GatewayCapabilitySummaries", + "documentation":"

A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use DescribeGatewayCapabilityConfiguration.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The date the gateway was created, in Unix epoch time.

" + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the gateway was last updated, in Unix epoch time.

" + } + }, + "documentation":"

Contains a summary of a gateway.

" + }, + "GetAssetPropertyAggregatesRequest":{ + "type":"structure", + "required":[ + "aggregateTypes", + "resolution", + "startDate", + "endDate" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset.

", + "location":"querystring", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

", + "location":"querystring", + "locationName":"propertyId" + }, + "propertyAlias":{ + "shape":"AssetPropertyAlias", + "documentation":"

The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping Industrial Data Streams to Asset Properties in the AWS IoT SiteWise User Guide.

", + "location":"querystring", + "locationName":"propertyAlias" + }, + "aggregateTypes":{ + "shape":"AggregateTypes", + "documentation":"

The data aggregating function.

", + "location":"querystring", + "locationName":"aggregateTypes" + }, + "resolution":{ + "shape":"Resolution", + "documentation":"

The time interval over which to aggregate data.

", + "location":"querystring", + "locationName":"resolution" + }, + "qualities":{ + "shape":"Qualities", + "documentation":"

The quality by which to filter asset data.

", + "location":"querystring", + "locationName":"qualities" + }, + "startDate":{ + "shape":"Timestamp", + "documentation":"

The exclusive start of the range from which to query historical data, expressed in seconds in Unix epoch time.

", + "location":"querystring", + "locationName":"startDate" + }, + "endDate":{ + "shape":"Timestamp", + "documentation":"

The inclusive end of the range from which to query historical data, expressed in seconds in Unix epoch time.

", + "location":"querystring", + "locationName":"endDate" + }, + "timeOrdering":{ + "shape":"TimeOrdering", + "documentation":"

The chronological sorting order of the requested information.

", + "location":"querystring", + "locationName":"timeOrdering" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "GetAssetPropertyAggregatesResponse":{ + "type":"structure", + "required":["aggregatedValues"], + "members":{ + "aggregatedValues":{ + "shape":"AggregatedValues", + "documentation":"

The requested aggregated values.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "GetAssetPropertyValueHistoryRequest":{ + "type":"structure", + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset.

", + "location":"querystring", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

", + "location":"querystring", + "locationName":"propertyId" + }, + "propertyAlias":{ + "shape":"AssetPropertyAlias", + "documentation":"

The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping Industrial Data Streams to Asset Properties in the AWS IoT SiteWise User Guide.

", + "location":"querystring", + "locationName":"propertyAlias" + }, + "startDate":{ + "shape":"Timestamp", + "documentation":"

The exclusive start of the range from which to query historical data, expressed in seconds in Unix epoch time.

", + "location":"querystring", + "locationName":"startDate" + }, + "endDate":{ + "shape":"Timestamp", + "documentation":"

The inclusive end of the range from which to query historical data, expressed in seconds in Unix epoch time.

", + "location":"querystring", + "locationName":"endDate" + }, + "qualities":{ + "shape":"Qualities", + "documentation":"

The quality by which to filter asset data.

", + "location":"querystring", + "locationName":"qualities" + }, + "timeOrdering":{ + "shape":"TimeOrdering", + "documentation":"

The chronological sorting order of the requested information.

", + "location":"querystring", + "locationName":"timeOrdering" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "GetAssetPropertyValueHistoryResponse":{ + "type":"structure", + "required":["assetPropertyValueHistory"], + "members":{ + "assetPropertyValueHistory":{ + "shape":"AssetPropertyValueHistory", + "documentation":"

The asset property's value history.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "GetAssetPropertyValueRequest":{ + "type":"structure", + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset.

", + "location":"querystring", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

", + "location":"querystring", + "locationName":"propertyId" + }, + "propertyAlias":{ + "shape":"AssetPropertyAlias", + "documentation":"

The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping Industrial Data Streams to Asset Properties in the AWS IoT SiteWise User Guide.

", + "location":"querystring", + "locationName":"propertyAlias" + } + } + }, + "GetAssetPropertyValueResponse":{ + "type":"structure", + "members":{ + "propertyValue":{ + "shape":"AssetPropertyValue", + "documentation":"

The current asset property value.

" + } + } + }, + "Greengrass":{ + "type":"structure", + "required":["groupArn"], + "members":{ + "groupArn":{ + "shape":"ARN", + "documentation":"

The ARN of the Greengrass group. For more information about how to find a group's ARN, see ListGroups and GetGroup in the AWS IoT Greengrass API Reference.

" + } + }, + "documentation":"

Contains details for a gateway that runs on AWS IoT Greengrass. To create a gateway that runs on AWS IoT Greengrass, you must add the IoT SiteWise connector to a Greengrass group and deploy it. Your Greengrass group must also have permissions to upload data to AWS IoT SiteWise. For more information, see Ingesting data using a gateway in the AWS IoT SiteWise User Guide.

" + }, + "GroupIdentity":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"IdentityId", + "documentation":"

The AWS SSO ID of the group.

" + } + }, + "documentation":"

Contains information for a group identity in an access policy.

" + }, + "ID":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + }, + "IDs":{ + "type":"list", + "member":{"shape":"ID"}, + "max":100, + "min":1 + }, + "Identity":{ + "type":"structure", + "members":{ + "user":{ + "shape":"UserIdentity", + "documentation":"

A user identity.

" + }, + "group":{ + "shape":"GroupIdentity", + "documentation":"

A group identity.

" + } + }, + "documentation":"

Contains an AWS SSO identity ID for a user or group.

Currently, you can't use AWS APIs to retrieve AWS SSO identity IDs. You can find the AWS SSO identity IDs in the URL of user and group pages in the AWS SSO console.

" + }, + "IdentityId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\S+" + }, + "IdentityType":{ + "type":"string", + "enum":[ + "USER", + "GROUP" + ] + }, + "Image":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of an existing image. Specify this parameter to keep an existing image.

" + }, + "file":{"shape":"ImageFile"} + }, + "documentation":"

Contains an image that is one of the following:

  • An image file. Choose this option to upload a new image.

  • The ID of an existing image. Choose this option to keep an existing image.

" + }, + "ImageFile":{ + "type":"structure", + "required":[ + "data", + "type" + ], + "members":{ + "data":{ + "shape":"ImageFileData", + "documentation":"

The image file contents, represented as a base64-encoded string. The file size must be less than 1 MB.

" + }, + "type":{ + "shape":"ImageFileType", + "documentation":"

The file type of the image.

" + } + }, + "documentation":"

Contains an image file.

" + }, + "ImageFileData":{ + "type":"blob", + "max":1500000, + "min":1 + }, + "ImageFileType":{ + "type":"string", + "enum":["PNG"] + }, + "ImageLocation":{ + "type":"structure", + "required":[ + "id", + "url" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the image.

" + }, + "url":{ + "shape":"Url", + "documentation":"

The URL where the image is available. The URL is valid for 15 minutes so that you can view and download the image

" + } + }, + "documentation":"

Contains an image that is uploaded to AWS IoT SiteWise and available at a URL.

" + }, + "InternalFailureException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

AWS IoT SiteWise can't process your request right now. Try again later.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "Interval":{ + "type":"string", + "max":3, + "min":2, + "pattern":"1w|1d|1h|15m|5m|1m" + }, + "InvalidRequestException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request isn't valid. This can occur if your request contains malformed JSON or unsupported characters. Check your request and try again.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

You've reached the limit for a resource. For example, this can occur if you're trying to associate more than the allowed number of child assets or attempting to create more than the allowed number of properties for an asset model.

For more information, see Quotas in the AWS IoT SiteWise User Guide.

", + "error":{"httpStatusCode":410}, + "exception":true + }, + "ListAccessPoliciesRequest":{ + "type":"structure", + "members":{ + "identityType":{ + "shape":"IdentityType", + "documentation":"

The type of identity (user or group). This parameter is required if you specify identityId.

", + "location":"querystring", + "locationName":"identityType" + }, + "identityId":{ + "shape":"IdentityId", + "documentation":"

The ID of the identity. This parameter is required if you specify identityType.

", + "location":"querystring", + "locationName":"identityId" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource (portal or project). This parameter is required if you specify resourceId.

", + "location":"querystring", + "locationName":"resourceType" + }, + "resourceId":{ + "shape":"ID", + "documentation":"

The ID of the resource. This parameter is required if you specify resourceType.

", + "location":"querystring", + "locationName":"resourceId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAccessPoliciesResponse":{ + "type":"structure", + "required":["accessPolicySummaries"], + "members":{ + "accessPolicySummaries":{ + "shape":"AccessPolicySummaries", + "documentation":"

A list that summarizes each access policy.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "ListAssetModelsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssetModelsResponse":{ + "type":"structure", + "required":["assetModelSummaries"], + "members":{ + "assetModelSummaries":{ + "shape":"AssetModelSummaries", + "documentation":"

A list that summarizes each asset model.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "ListAssetsFilter":{ + "type":"string", + "enum":[ + "ALL", + "TOP_LEVEL" + ] + }, + "ListAssetsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + }, + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model by which to filter the list of assets. This parameter is required if you choose ALL for filter.

", + "location":"querystring", + "locationName":"assetModelId" + }, + "filter":{ + "shape":"ListAssetsFilter", + "documentation":"

The filter for the requested list of assets. Choose one of the following options. Defaults to ALL.

  • ALL – The list includes all assets for a given asset model ID. The assetModelId parameter is required if you filter by ALL.

  • TOP_LEVEL – The list includes only top-level assets in the asset hierarchy tree.

", + "location":"querystring", + "locationName":"filter" + } + } + }, + "ListAssetsResponse":{ + "type":"structure", + "required":["assetSummaries"], + "members":{ + "assetSummaries":{ + "shape":"AssetSummaries", + "documentation":"

A list that summarizes each asset.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "ListAssociatedAssetsRequest":{ + "type":"structure", + "required":[ + "assetId", + "hierarchyId" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the parent asset.

", + "location":"uri", + "locationName":"assetId" + }, + "hierarchyId":{ + "shape":"ID", + "documentation":"

The hierarchy ID (of the parent asset model) whose associated assets are returned. To find a hierarchy ID, use the DescribeAsset or DescribeAssetModel actions.

For more information, see Asset Hierarchies in the AWS IoT SiteWise User Guide.

", + "location":"querystring", + "locationName":"hierarchyId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAssociatedAssetsResponse":{ + "type":"structure", + "required":["assetSummaries"], + "members":{ + "assetSummaries":{ + "shape":"AssociatedAssetsSummaries", + "documentation":"

A list that summarizes the associated assets.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "ListDashboardsRequest":{ + "type":"structure", + "required":["projectId"], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project.

", + "location":"querystring", + "locationName":"projectId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListDashboardsResponse":{ + "type":"structure", + "required":["dashboardSummaries"], + "members":{ + "dashboardSummaries":{ + "shape":"DashboardSummaries", + "documentation":"

A list that summarizes each dashboard in the project.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "ListGatewaysRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListGatewaysResponse":{ + "type":"structure", + "required":["gatewaySummaries"], + "members":{ + "gatewaySummaries":{ + "shape":"GatewaySummaries", + "documentation":"

A list that summarizes each gateway.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "ListPortalsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListPortalsResponse":{ + "type":"structure", + "members":{ + "portalSummaries":{ + "shape":"PortalSummaries", + "documentation":"

A list that summarizes each portal.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "ListProjectAssetsRequest":{ + "type":"structure", + "required":["projectId"], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project.

", + "location":"uri", + "locationName":"projectId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProjectAssetsResponse":{ + "type":"structure", + "required":["assetIds"], + "members":{ + "assetIds":{ + "shape":"AssetIDs", + "documentation":"

A list that contains the IDs of each asset associated with the project.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "ListProjectsRequest":{ + "type":"structure", + "required":["portalId"], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

The ID of the portal.

", + "location":"querystring", + "locationName":"portalId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per paginated request.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProjectsResponse":{ + "type":"structure", + "required":["projectSummaries"], + "members":{ + "projectSummaries":{ + "shape":"ProjectSummaries", + "documentation":"

A list that summarizes each project in the portal.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN of the resource.

", + "location":"querystring", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

The list of key-value pairs that contain metadata for the resource. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

" + } + } + }, + "LoggingLevel":{ + "type":"string", + "enum":[ + "ERROR", + "INFO", + "OFF" + ] + }, + "LoggingOptions":{ + "type":"structure", + "required":["level"], + "members":{ + "level":{ + "shape":"LoggingLevel", + "documentation":"

The AWS IoT SiteWise logging verbosity level.

" + } + }, + "documentation":"

Contains logging options.

" + }, + "Macro":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "MaxResults":{ + "type":"integer", + "max":250, + "min":1 + }, + "Measurement":{ + "type":"structure", + "members":{ + }, + "documentation":"

Contains an asset measurement property. This structure is empty. For more information, see Measurements in the AWS IoT SiteWise User Guide.

" + }, + "Metric":{ + "type":"structure", + "required":[ + "expression", + "variables", + "window" + ], + "members":{ + "expression":{ + "shape":"Expression", + "documentation":"

The mathematical expression that defines the metric aggregation function. You can specify up to 10 variables per expression. You can specify up to 10 functions per expression.

For more information, see Quotas in the AWS IoT SiteWise User Guide.

" + }, + "variables":{ + "shape":"ExpressionVariables", + "documentation":"

The list of variables used in the expression.

" + }, + "window":{ + "shape":"MetricWindow", + "documentation":"

The window (time interval) over which AWS IoT SiteWise computes the metric's aggregation expression. AWS IoT SiteWise computes one data point per window.

" + } + }, + "documentation":"

Contains an asset metric property. With metrics, you can calculate aggregate functions, such as an average, maximum, or minimum, as specified through an expression. A metric maps several values to a single value (such as a sum).

The maximum number of dependent/cascading variables used in any one metric calculation is 10. Therefore, a root metric can have up to 10 cascading metrics in its computational dependency tree. Additionally, a metric can only have a data type of DOUBLE and consume properties with data types of INTEGER or DOUBLE.

For more information, see Metrics in the AWS IoT SiteWise User Guide.

" + }, + "MetricWindow":{ + "type":"structure", + "members":{ + "tumbling":{ + "shape":"TumblingWindow", + "documentation":"

The tumbling time interval window.

" + } + }, + "documentation":"

Contains a time interval window used for data aggregate computations (for example, average, sum, count, and so on).

" + }, + "MonitorErrorCode":{ + "type":"string", + "enum":["INTERNAL_FAILURE"] + }, + "MonitorErrorDetails":{ + "type":"structure", + "members":{ + "code":{ + "shape":"MonitorErrorCode", + "documentation":"

The error code.

" + }, + "message":{ + "shape":"MonitorErrorMessage", + "documentation":"

The error message.

" + } + }, + "documentation":"

Contains AWS IoT SiteWise Monitor error details.

" + }, + "MonitorErrorMessage":{"type":"string"}, + "Name":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[A-Za-z0-9+/=]+" + }, + "OffsetInNanos":{ + "type":"integer", + "max":999999999, + "min":0 + }, + "Permission":{ + "type":"string", + "enum":[ + "ADMINISTRATOR", + "VIEWER" + ] + }, + "PortalClientId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[!-~]*" + }, + "PortalResource":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the portal.

" + } + }, + "documentation":"

Identifies an AWS IoT SiteWise Monitor portal.

" + }, + "PortalState":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE", + "FAILED" + ] + }, + "PortalStatus":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{ + "shape":"PortalState", + "documentation":"

The current state of the portal.

" + }, + "error":{ + "shape":"MonitorErrorDetails", + "documentation":"

Contains associated error information, if any.

" + } + }, + "documentation":"

Contains information about the current status of a portal.

" + }, + "PortalSummaries":{ + "type":"list", + "member":{"shape":"PortalSummary"} + }, + "PortalSummary":{ + "type":"structure", + "required":[ + "id", + "name", + "startUrl" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the portal.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the portal.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The portal's description.

" + }, + "startUrl":{ + "shape":"Url", + "documentation":"

The public root URL for the AWS IoT AWS IoT SiteWise Monitor application portal.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The date the portal was created, in Unix epoch time.

" + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the portal was last updated, in Unix epoch time.

" + }, + "roleArn":{ + "shape":"ARN", + "documentation":"

The ARN of the service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

" + } + }, + "documentation":"

Contains a portal summary.

" + }, + "ProjectResource":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the project.

" + } + }, + "documentation":"

Identifies a specific AWS IoT SiteWise Monitor project.

" + }, + "ProjectSummaries":{ + "type":"list", + "member":{"shape":"ProjectSummary"} + }, + "ProjectSummary":{ + "type":"structure", + "required":[ + "id", + "name" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the project.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the project.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The project's description.

" + }, + "creationDate":{ + "shape":"Timestamp", + "documentation":"

The date the project was created, in Unix epoch time.

" + }, + "lastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date the project was last updated, in Unix epoch time.

" + } + }, + "documentation":"

Contains project summary information.

" + }, + "Property":{ + "type":"structure", + "required":[ + "id", + "name", + "dataType" + ], + "members":{ + "id":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the property.

" + }, + "alias":{ + "shape":"PropertyAlias", + "documentation":"

The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping Industrial Data Streams to Asset Properties in the AWS IoT SiteWise User Guide.

" + }, + "notification":{ + "shape":"PropertyNotification", + "documentation":"

The asset property's notification topic and state. For more information, see UpdateAssetProperty

" + }, + "dataType":{ + "shape":"PropertyDataType", + "documentation":"

The property data type.

" + }, + "unit":{ + "shape":"PropertyUnit", + "documentation":"

The unit (such as Newtons or RPM) of the asset property.

" + }, + "type":{ + "shape":"PropertyType", + "documentation":"

The property type (see PropertyType). A property contains one type.

" + } + }, + "documentation":"

Contains asset property information.

" + }, + "PropertyAlias":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "PropertyDataType":{ + "type":"string", + "enum":[ + "STRING", + "INTEGER", + "DOUBLE", + "BOOLEAN" + ] + }, + "PropertyNotification":{ + "type":"structure", + "required":[ + "topic", + "state" + ], + "members":{ + "topic":{ + "shape":"PropertyNotificationTopic", + "documentation":"

The MQTT topic to which AWS IoT SiteWise publishes property value update notifications.

" + }, + "state":{ + "shape":"PropertyNotificationState", + "documentation":"

The current notification state.

" + } + }, + "documentation":"

Contains asset property value notification information. When the notification state is enabled, AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see Interacting with Other Services in the AWS IoT SiteWise User Guide.

" + }, + "PropertyNotificationState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "PropertyNotificationTopic":{"type":"string"}, + "PropertyType":{ + "type":"structure", + "members":{ + "attribute":{ + "shape":"Attribute", + "documentation":"

Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an IIoT wind turbine.

" + }, + "measurement":{ + "shape":"Measurement", + "documentation":"

Specifies an asset measurement property. A measurement represents a device's raw sensor data stream, such as timestamped temperature values or timestamped power values.

" + }, + "transform":{ + "shape":"Transform", + "documentation":"

Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.

" + }, + "metric":{ + "shape":"Metric", + "documentation":"

Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.

" + } + }, + "documentation":"

Contains a property type, which can be one of attribute, measurement, metric, or transform.

" + }, + "PropertyUnit":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "PropertyValueBooleanValue":{"type":"boolean"}, + "PropertyValueDoubleValue":{"type":"double"}, + "PropertyValueIntegerValue":{"type":"integer"}, + "PropertyValueStringValue":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[^\\u0000-\\u001F\\u007F]+" + }, + "PutAssetPropertyValueEntries":{ + "type":"list", + "member":{"shape":"PutAssetPropertyValueEntry"} + }, + "PutAssetPropertyValueEntry":{ + "type":"structure", + "required":[ + "entryId", + "propertyValues" + ], + "members":{ + "entryId":{ + "shape":"EntryId", + "documentation":"

The user specified ID for the entry. You can use this ID to identify which entries failed.

" + }, + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset to update.

" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property for this entry.

" + }, + "propertyAlias":{ + "shape":"AssetPropertyAlias", + "documentation":"

The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping Industrial Data Streams to Asset Properties in the AWS IoT SiteWise User Guide.

" + }, + "propertyValues":{ + "shape":"AssetPropertyValues", + "documentation":"

The list of property values to upload. You can specify up to 10 propertyValues array elements.

" + } + }, + "documentation":"

Contains a list of value updates for an asset property in the list of asset entries consumed by the BatchPutAssetPropertyValue API.

" + }, + "PutLoggingOptionsRequest":{ + "type":"structure", + "required":["loggingOptions"], + "members":{ + "loggingOptions":{ + "shape":"LoggingOptions", + "documentation":"

The logging options to set.

" + } + } + }, + "PutLoggingOptionsResponse":{ + "type":"structure", + "members":{ + } + }, + "Qualities":{ + "type":"list", + "member":{"shape":"Quality"}, + "max":1, + "min":1 + }, + "Quality":{ + "type":"string", + "enum":[ + "GOOD", + "BAD", + "UNCERTAIN" + ] + }, + "Resolution":{ + "type":"string", + "max":2, + "min":2, + "pattern":"1m|1h|1d" + }, + "Resource":{ + "type":"structure", + "members":{ + "portal":{ + "shape":"PortalResource", + "documentation":"

A portal resource.

" + }, + "project":{ + "shape":"ProjectResource", + "documentation":"

A project resource.

" + } + }, + "documentation":"

Contains an AWS IoT SiteWise Monitor resource ID for a portal or project.

" + }, + "ResourceAlreadyExistsException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceArn" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "resourceId":{ + "shape":"ResourceId", + "documentation":"

The ID of the resource that already exists.

" + }, + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the resource that already exists.

" + } + }, + "documentation":"

The resource already exists.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceArn":{"type":"string"}, + "ResourceId":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The requested resource can't be found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "PORTAL", + "PROJECT" + ] + }, + "SSOApplicationId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[!-~]*" + }, + "ServiceUnavailableException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The requested service is unavailable.

", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN of the resource to tag.

", + "location":"querystring", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A list of key-value pairs that contain metadata for the resource. For more information, see Tagging your AWS IoT SiteWise resources in the AWS IoT SiteWise User Guide.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

Your request exceeded a rate limit. For example, you might have exceeded the number of AWS IoT SiteWise assets that can be created per second, the allowed number of messages per second, and so on.

For more information, see Quotas in the AWS IoT SiteWise User Guide.

", + "error":{"httpStatusCode":429}, + "exception":true + }, + "TimeInNanos":{ + "type":"structure", + "required":["timeInSeconds"], + "members":{ + "timeInSeconds":{ + "shape":"TimeInSeconds", + "documentation":"

The timestamp date, in seconds, in the Unix epoch format. Fractional nanosecond data is provided by offsetInNanos.

" + }, + "offsetInNanos":{ + "shape":"OffsetInNanos", + "documentation":"

The nanosecond offset from timeInSeconds.

" + } + }, + "documentation":"

Contains a timestamp with optional nanosecond granularity.

" + }, + "TimeInSeconds":{ + "type":"long", + "max":31556889864403199, + "min":1 + }, + "TimeOrdering":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "Timestamp":{"type":"timestamp"}, + "Timestamps":{ + "type":"list", + "member":{"shape":"TimeInNanos"} + }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"}, + "resourceName":{ + "shape":"AmazonResourceName", + "documentation":"

The name of the resource with too many tags.

" + } + }, + "documentation":"

You've reached the limit for the number of tags allowed for a resource. For more information, see Tag naming limits and requirements in the AWS General Reference.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "Transform":{ + "type":"structure", + "required":[ + "expression", + "variables" + ], + "members":{ + "expression":{ + "shape":"Expression", + "documentation":"

The mathematical expression that defines the transformation function. You can specify up to 10 variables per expression. You can specify up to 10 functions per expression.

For more information, see Quotas in the AWS IoT SiteWise User Guide.

" + }, + "variables":{ + "shape":"ExpressionVariables", + "documentation":"

The list of variables used in the expression.

" + } + }, + "documentation":"

Contains an asset transform property. A transform is a one-to-one mapping of a property's data points from one form to another. For example, you can use a transform to convert a Celsius data stream to Fahrenheit by applying the transformation expression to each data point of the Celsius stream. A transform can only have a data type of DOUBLE and consume properties with data types of INTEGER or DOUBLE.

For more information, see Transforms in the AWS IoT SiteWise User Guide.

" + }, + "TumblingWindow":{ + "type":"structure", + "required":["interval"], + "members":{ + "interval":{ + "shape":"Interval", + "documentation":"

The time interval for the tumbling window. Note that w represents weeks, d represents days, h represents hours, and m represents minutes. AWS IoT SiteWise computes the 1w interval the end of Sunday at midnight each week (UTC), the 1d interval at the end of each day at midnight (UTC), the 1h interval at the end of each hour, and so on.

When AWS IoT SiteWise aggregates data points for metric computations, the start of each interval is exclusive and the end of each interval is inclusive. AWS IoT SiteWise places the computed data point at the end of the interval.

" + } + }, + "documentation":"

Contains a tumbling window, which is a repeating fixed-sized, non-overlapping, and contiguous time interval. This window is used in metric and aggregation computations.

" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN of the resource to untag.

", + "location":"querystring", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

A list of keys for tags to remove from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAccessPolicyRequest":{ + "type":"structure", + "required":[ + "accessPolicyId", + "accessPolicyIdentity", + "accessPolicyResource", + "accessPolicyPermission" + ], + "members":{ + "accessPolicyId":{ + "shape":"ID", + "documentation":"

The ID of the access policy.

", + "location":"uri", + "locationName":"accessPolicyId" + }, + "accessPolicyIdentity":{ + "shape":"Identity", + "documentation":"

The identity for this access policy. Choose either a user or a group but not both.

" + }, + "accessPolicyResource":{ + "shape":"Resource", + "documentation":"

The AWS IoT SiteWise Monitor resource for this access policy. Choose either portal or project but not both.

" + }, + "accessPolicyPermission":{ + "shape":"Permission", + "documentation":"

The permission level for this access policy. Note that a project ADMINISTRATOR is also known as a project owner.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "UpdateAccessPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAssetModelRequest":{ + "type":"structure", + "required":[ + "assetModelId", + "assetModelName" + ], + "members":{ + "assetModelId":{ + "shape":"ID", + "documentation":"

The ID of the asset model to update.

", + "location":"uri", + "locationName":"assetModelId" + }, + "assetModelName":{ + "shape":"Name", + "documentation":"

A unique, friendly name for the asset model.

" + }, + "assetModelDescription":{ + "shape":"Description", + "documentation":"

A description for the asset model.

" + }, + "assetModelProperties":{ + "shape":"AssetModelProperties", + "documentation":"

The updated property definitions of the asset model. For more information, see Asset Properties in the AWS IoT SiteWise User Guide.

You can specify up to 200 properties per asset model. For more information, see Quotas in the AWS IoT SiteWise User Guide.

" + }, + "assetModelHierarchies":{ + "shape":"AssetModelHierarchies", + "documentation":"

The updated hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see Asset Hierarchies in the AWS IoT SiteWise User Guide.

You can specify up to 10 hierarchies per asset model. For more information, see Quotas in the AWS IoT SiteWise User Guide.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "UpdateAssetModelResponse":{ + "type":"structure", + "required":["assetModelStatus"], + "members":{ + "assetModelStatus":{ + "shape":"AssetModelStatus", + "documentation":"

The status of the asset model, which contains a state (UPDATING after successfully calling this operation) and any error message.

" + } + } + }, + "UpdateAssetPropertyRequest":{ + "type":"structure", + "required":[ + "assetId", + "propertyId" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset to be updated.

", + "location":"uri", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property to be updated.

", + "location":"uri", + "locationName":"propertyId" + }, + "propertyAlias":{ + "shape":"PropertyAlias", + "documentation":"

The property alias that identifies the property, such as an OPC-UA server data stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information, see Mapping Industrial Data Streams to Asset Properties in the AWS IoT SiteWise User Guide.

If you omit this parameter, the alias is removed from the property.

" + }, + "propertyNotificationState":{ + "shape":"PropertyNotificationState", + "documentation":"

The MQTT notification state (enabled or disabled) for this asset property. When the notification state is enabled, AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see Interacting with Other Services in the AWS IoT SiteWise User Guide.

If you omit this parameter, the notification state is set to DISABLED.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "UpdateAssetRequest":{ + "type":"structure", + "required":[ + "assetId", + "assetName" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset to update.

", + "location":"uri", + "locationName":"assetId" + }, + "assetName":{ + "shape":"Name", + "documentation":"

A unique, friendly name for the asset.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "UpdateAssetResponse":{ + "type":"structure", + "required":["assetStatus"], + "members":{ + "assetStatus":{ + "shape":"AssetStatus", + "documentation":"

The status of the asset, which contains a state (UPDATING after successfully calling this operation) and any error message.

" + } + } + }, + "UpdateDashboardRequest":{ + "type":"structure", + "required":[ + "dashboardId", + "dashboardName", + "dashboardDefinition" + ], + "members":{ + "dashboardId":{ + "shape":"ID", + "documentation":"

The ID of the dashboard to update.

", + "location":"uri", + "locationName":"dashboardId" + }, + "dashboardName":{ + "shape":"Name", + "documentation":"

A new friendly name for the dashboard.

" + }, + "dashboardDescription":{ + "shape":"Description", + "documentation":"

A new description for the dashboard.

" + }, + "dashboardDefinition":{ + "shape":"DashboardDefinition", + "documentation":"

The new dashboard definition, as specified in a JSON literal. For detailed information, see Creating Dashboards (CLI) in the AWS IoT SiteWise User Guide.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "UpdateDashboardResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateGatewayCapabilityConfigurationRequest":{ + "type":"structure", + "required":[ + "gatewayId", + "capabilityNamespace", + "capabilityConfiguration" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

The ID of the gateway to be updated.

", + "location":"uri", + "locationName":"gatewayId" + }, + "capabilityNamespace":{ + "shape":"CapabilityNamespace", + "documentation":"

The namespace of the gateway capability configuration to be updated. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace iotsitewise:opcuacollector:version, where version is a number such as 1.

" + }, + "capabilityConfiguration":{ + "shape":"CapabilityConfiguration", + "documentation":"

The JSON document that defines the configuration for the gateway capability. For more information, see Configuring data sources (CLI) in the AWS IoT SiteWise User Guide.

" + } + } + }, + "UpdateGatewayCapabilityConfigurationResponse":{ + "type":"structure", + "required":[ + "capabilityNamespace", + "capabilitySyncStatus" + ], + "members":{ + "capabilityNamespace":{ + "shape":"CapabilityNamespace", + "documentation":"

The namespace of the gateway capability.

" + }, + "capabilitySyncStatus":{ + "shape":"CapabilitySyncStatus", + "documentation":"

The synchronization status of the capability configuration. The sync status can be one of the following:

  • IN_SYNC – The gateway is running the capability configuration.

  • OUT_OF_SYNC – The gateway hasn't received the capability configuration.

  • SYNC_FAILED – The gateway rejected the capability configuration.

After you update a capability configuration, its sync status is OUT_OF_SYNC until the gateway receives and applies or rejects the updated configuration.

" + } + } + }, + "UpdateGatewayRequest":{ + "type":"structure", + "required":[ + "gatewayId", + "gatewayName" + ], + "members":{ + "gatewayId":{ + "shape":"ID", + "documentation":"

The ID of the gateway to update.

", + "location":"uri", + "locationName":"gatewayId" + }, + "gatewayName":{ + "shape":"Name", + "documentation":"

A unique, friendly name for the gateway.

" + } + } + }, + "UpdatePortalRequest":{ + "type":"structure", + "required":[ + "portalId", + "portalName", + "portalContactEmail", + "roleArn" + ], + "members":{ + "portalId":{ + "shape":"ID", + "documentation":"

The ID of the portal to update.

", + "location":"uri", + "locationName":"portalId" + }, + "portalName":{ + "shape":"Name", + "documentation":"

A new friendly name for the portal.

" + }, + "portalDescription":{ + "shape":"Description", + "documentation":"

A new description for the portal.

" + }, + "portalContactEmail":{ + "shape":"Email", + "documentation":"

The AWS administrator's contact email address.

" + }, + "portalLogoImage":{"shape":"Image"}, + "roleArn":{ + "shape":"ARN", + "documentation":"

The ARN of a service role that allows the portal's users to access your AWS IoT SiteWise resources on your behalf. For more information, see Using service roles for AWS IoT SiteWise Monitor in the AWS IoT SiteWise User Guide.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "UpdatePortalResponse":{ + "type":"structure", + "required":["portalStatus"], + "members":{ + "portalStatus":{ + "shape":"PortalStatus", + "documentation":"

The status of the portal, which contains a state (UPDATING after successfully calling this operation) and any error message.

" + } + } + }, + "UpdateProjectRequest":{ + "type":"structure", + "required":[ + "projectId", + "projectName" + ], + "members":{ + "projectId":{ + "shape":"ID", + "documentation":"

The ID of the project to update.

", + "location":"uri", + "locationName":"projectId" + }, + "projectName":{ + "shape":"Name", + "documentation":"

A new friendly name for the project.

" + }, + "projectDescription":{ + "shape":"Description", + "documentation":"

A new description for the project.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "UpdateProjectResponse":{ + "type":"structure", + "members":{ + } + }, + "Url":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^(http|https)\\://\\S+" + }, + "UserIdentity":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"IdentityId", + "documentation":"

The AWS SSO ID of the user.

" + } + }, + "documentation":"

Contains information for a user identity in an access policy.

" + }, + "VariableName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-z][a-z0-9_]*$" + }, + "VariableValue":{ + "type":"structure", + "required":["propertyId"], + "members":{ + "propertyId":{ + "shape":"Macro", + "documentation":"

The ID of the property to use as the variable. You can use the property name if it's from the same asset model.

" + }, + "hierarchyId":{ + "shape":"Macro", + "documentation":"

The ID of the hierarchy to query for the property ID. You can use the hierarchy's name instead of the hierarchy's ID.

You use a hierarchy ID instead of a model ID because you can have several hierarchies using the same model and therefore the same propertyId. For example, you might have separately grouped assets that come from the same asset model. For more information, see Asset Hierarchies in the AWS IoT SiteWise User Guide.

" + } + }, + "documentation":"

Identifies a property value used in an expression.

" + }, + "Variant":{ + "type":"structure", + "members":{ + "stringValue":{ + "shape":"PropertyValueStringValue", + "documentation":"

Asset property data of type string (sequence of characters).

" + }, + "integerValue":{ + "shape":"PropertyValueIntegerValue", + "documentation":"

Asset property data of type integer (whole number).

" + }, + "doubleValue":{ + "shape":"PropertyValueDoubleValue", + "documentation":"

Asset property data of type double (floating point number).

" + }, + "booleanValue":{ + "shape":"PropertyValueBooleanValue", + "documentation":"

Asset property data of type Boolean (true or false).

" + } + }, + "documentation":"

Contains an asset property value (of a single type only).

" + } + }, + "documentation":"

Welcome to the AWS IoT SiteWise API Reference. AWS IoT SiteWise is an AWS service that connects Industrial Internet of Things (IIoT) devices to the power of the AWS Cloud. For more information, see the AWS IoT SiteWise User Guide. For information about AWS IoT SiteWise quotas, see Quotas in the AWS IoT SiteWise User Guide.

" +} diff --git a/services/iotsitewise/src/main/resources/codegen-resources/waiters-2.json b/services/iotsitewise/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..e51df5feaaa9 --- /dev/null +++ b/services/iotsitewise/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,92 @@ +{ + "version": 2, + "waiters": { + "AssetModelNotExists": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribeAssetModel", + "acceptors": [ + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "AssetModelActive": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribeAssetModel", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "assetModelStatus.state", + "expected": "ACTIVE" + }, + { + "state": "failure", + "matcher": "path", + "argument": "assetModelStatus.state", + "expected": "FAILED" + } + ] + }, + "AssetNotExists": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribeAsset", + "acceptors": [ + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "AssetActive": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribeAsset", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "assetStatus.state", + "expected": "ACTIVE" + }, + { + "state": "failure", + "matcher": "path", + "argument": "assetStatus.state", + "expected": "FAILED" + } + ] + }, + "PortalNotExists": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribePortal", + "acceptors": [ + { + "state": "success", + "matcher": "error", + "expected": "ResourceNotFoundException" + } + ] + }, + "PortalActive": { + "delay": 3, + "maxAttempts": 20, + "operation": "DescribePortal", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "portalStatus.state", + "expected": "ACTIVE" + } + ] + } + } + } diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index ea0f997a755a..6ce35c84d793 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml new file mode 100644 index 000000000000..fcab9cf416b3 --- /dev/null +++ b/services/ivs/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.13.56-SNAPSHOT + + ivs + AWS Java SDK :: Services :: Ivs + The AWS Java SDK for Ivs module holds the client classes that are used for + communicating with Ivs. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.ivs + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/ivs/src/main/resources/codegen-resources/paginators-1.json b/services/ivs/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..2d16f8965700 --- /dev/null +++ b/services/ivs/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,27 @@ +{ + "pagination": { + "ListChannels": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "channels" + }, + "ListStreamKeys": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "streamKeys" + }, + "ListStreams": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "streams" + }, + "ListTagsForResource": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + } + } +} diff --git a/services/ivs/src/main/resources/codegen-resources/service-2.json b/services/ivs/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..18aa30d0b178 --- /dev/null +++ b/services/ivs/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1082 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-07-14", + "endpointPrefix":"ivs", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon IVS", + "serviceFullName":"Amazon Interactive Video Service", + "serviceId":"ivs", + "signatureVersion":"v4", + "signingName":"ivs", + "uid":"ivs-2020-07-14" + }, + "operations":{ + "BatchGetChannel":{ + "name":"BatchGetChannel", + "http":{ + "method":"POST", + "requestUri":"/BatchGetChannel" + }, + "input":{"shape":"BatchGetChannelRequest"}, + "output":{"shape":"BatchGetChannelResponse"}, + "documentation":"

Performs GetChannel on multiple ARNs simultaneously.

" + }, + "BatchGetStreamKey":{ + "name":"BatchGetStreamKey", + "http":{ + "method":"POST", + "requestUri":"/BatchGetStreamKey" + }, + "input":{"shape":"BatchGetStreamKeyRequest"}, + "output":{"shape":"BatchGetStreamKeyResponse"}, + "documentation":"

Performs GetStreamKey on multiple ARNs simultaneously.

" + }, + "CreateChannel":{ + "name":"CreateChannel", + "http":{ + "method":"POST", + "requestUri":"/CreateChannel" + }, + "input":{"shape":"CreateChannelRequest"}, + "output":{"shape":"CreateChannelResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a new channel and an associated stream key to start streaming.

" + }, + "CreateStreamKey":{ + "name":"CreateStreamKey", + "http":{ + "method":"POST", + "requestUri":"/CreateStreamKey" + }, + "input":{"shape":"CreateStreamKeyRequest"}, + "output":{"shape":"CreateStreamKeyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a stream key, used to initiate a stream, for a specified channel ARN.

Note that CreateChannel creates a stream key. If you subsequently use CreateStreamKey on the same channel, it will fail because a stream key already exists and there is a limit of 1 stream key per channel. To reset the stream key on a channel, use DeleteStreamKey and then CreateStreamKey.

" + }, + "DeleteChannel":{ + "name":"DeleteChannel", + "http":{ + "method":"POST", + "requestUri":"/DeleteChannel" + }, + "input":{"shape":"DeleteChannelRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes a specified channel and its associated stream keys.

" + }, + "DeleteStreamKey":{ + "name":"DeleteStreamKey", + "http":{ + "method":"POST", + "requestUri":"/DeleteStreamKey" + }, + "input":{"shape":"DeleteStreamKeyRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes the stream key for a specified ARN, so it can no longer be used to stream.

" + }, + "GetChannel":{ + "name":"GetChannel", + "http":{ + "method":"POST", + "requestUri":"/GetChannel" + }, + "input":{"shape":"GetChannelRequest"}, + "output":{"shape":"GetChannelResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets the channel configuration for a specified channel ARN. See also BatchGetChannel.

" + }, + "GetStream":{ + "name":"GetStream", + "http":{ + "method":"POST", + "requestUri":"/GetStream" + }, + "input":{"shape":"GetStreamRequest"}, + "output":{"shape":"GetStreamResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ChannelNotBroadcasting"} + ], + "documentation":"

Gets information about the active (live) stream on a specified channel.

" + }, + "GetStreamKey":{ + "name":"GetStreamKey", + "http":{ + "method":"POST", + "requestUri":"/GetStreamKey" + }, + "input":{"shape":"GetStreamKeyRequest"}, + "output":{"shape":"GetStreamKeyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets stream-key information for a specified ARN.

" + }, + "ListChannels":{ + "name":"ListChannels", + "http":{ + "method":"POST", + "requestUri":"/ListChannels" + }, + "input":{"shape":"ListChannelsRequest"}, + "output":{"shape":"ListChannelsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Gets summary information about channels. This list can be filtered to match a specified string.

" + }, + "ListStreamKeys":{ + "name":"ListStreamKeys", + "http":{ + "method":"POST", + "requestUri":"/ListStreamKeys" + }, + "input":{"shape":"ListStreamKeysRequest"}, + "output":{"shape":"ListStreamKeysResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets summary information about stream keys. The list can be filtered to a particular channel.

" + }, + "ListStreams":{ + "name":"ListStreams", + "http":{ + "method":"POST", + "requestUri":"/ListStreams" + }, + "input":{"shape":"ListStreamsRequest"}, + "output":{"shape":"ListStreamsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"} + ], + "documentation":"

Gets summary information about live streams.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets information about the tags for a specified ARN.

" + }, + "PutMetadata":{ + "name":"PutMetadata", + "http":{ + "method":"POST", + "requestUri":"/PutMetadata" + }, + "input":{"shape":"PutMetadataRequest"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ChannelNotBroadcasting"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Inserts metadata into an RTMP stream for a specified channel. A maximum of 5 requests per second per channel is allowed, each with a maximum 1KB payload.

" + }, + "StopStream":{ + "name":"StopStream", + "http":{ + "method":"POST", + "requestUri":"/StopStream" + }, + "input":{"shape":"StopStreamRequest"}, + "output":{"shape":"StopStreamResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ChannelNotBroadcasting"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"StreamUnavailable"} + ], + "documentation":"

Disconnects the stream for the specified channel. This disconnects the incoming RTMP stream from the client. Can be used in conjunction with DeleteStreamKey to prevent further streaming to a channel.

Many streaming client-software libraries automatically reconnect a dropped RTMP session, so to stop the stream permanently, you may want to first revoke the streamKey attached to the channel.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds or updates tags for a resource with a specified ARN.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes tags for a resource with a specified ARN.

" + }, + "UpdateChannel":{ + "name":"UpdateChannel", + "http":{ + "method":"POST", + "requestUri":"/UpdateChannel" + }, + "input":{"shape":"UpdateChannelRequest"}, + "output":{"shape":"UpdateChannelResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Updates a channel's configuration. This does not affect an ongoing stream of this channel. You must stop and restart the stream for the changes to take effect.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

User does not have sufficient access to perform this action.

" + } + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "BatchError":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ResourceArn", + "documentation":"

Channel ARN.

" + }, + "code":{ + "shape":"errorCode", + "documentation":"

Error code.

" + }, + "message":{ + "shape":"errorMessage", + "documentation":"

Error message, determined by the application.

" + } + }, + "documentation":"

Error related to a specific channel, specified by its ARN.

" + }, + "BatchErrors":{ + "type":"list", + "member":{"shape":"BatchError"} + }, + "BatchGetChannelRequest":{ + "type":"structure", + "required":["arns"], + "members":{ + "arns":{ + "shape":"ChannelArnList", + "documentation":"

Array of ARNs, one per channel.

" + } + } + }, + "BatchGetChannelResponse":{ + "type":"structure", + "members":{ + "channels":{"shape":"Channels"}, + "errors":{ + "shape":"BatchErrors", + "documentation":"

Each error object is related to a specific ARN in the request.

" + } + } + }, + "BatchGetStreamKeyRequest":{ + "type":"structure", + "required":["arns"], + "members":{ + "arns":{ + "shape":"StreamKeyArnList", + "documentation":"

Array of ARNs, one per channel.

" + } + } + }, + "BatchGetStreamKeyResponse":{ + "type":"structure", + "members":{ + "streamKeys":{"shape":"StreamKeys"}, + "errors":{"shape":"BatchErrors"} + } + }, + "Channel":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"

Channel ARN.

" + }, + "name":{ + "shape":"ChannelName", + "documentation":"

Channel name.

" + }, + "latencyMode":{ + "shape":"ChannelLatencyMode", + "documentation":"

Channel latency mode. Default: LOW.

" + }, + "type":{ + "shape":"ChannelType", + "documentation":"

Channel type, which determines the allowable resolution and bitrate. STANDARD: The stream is transcoded; resolution (width, in landscape orientation) can be up to 1080p or the input source resolution, whichever is lower; and bitrate can be up to 8.5 Mbps. BASIC: The stream is transfixed; resolution can be up to 480p; and bitrate can be up to 1.5 Mbps. Default STANDARD.

" + }, + "ingestEndpoint":{ + "shape":"IngestEndpoint", + "documentation":"

Channel ingest endpoint, part of the definition of an ingest server, used when you set up streaming software.

" + }, + "playbackUrl":{ + "shape":"PlaybackURL", + "documentation":"

Channel playback URL.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Array of 1-50 maps, each of the form string:string (key:value).

" + } + }, + "documentation":"

Object specifying a channel.

" + }, + "ChannelArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:aws:[is]vs:[a-z0-9-]+:[0-9]+:channel/[a-zA-Z0-9-]+$" + }, + "ChannelArnList":{ + "type":"list", + "member":{"shape":"ChannelArn"}, + "max":50, + "min":1 + }, + "ChannelLatencyMode":{ + "type":"string", + "enum":[ + "NORMAL", + "LOW" + ] + }, + "ChannelList":{ + "type":"list", + "member":{"shape":"ChannelSummary"} + }, + "ChannelName":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^[a-zA-Z0-9-_]*$" + }, + "ChannelNotBroadcasting":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

The stream is offline for the given channel ARN.

" + } + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "ChannelSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"

Channel ARN.

" + }, + "name":{ + "shape":"ChannelName", + "documentation":"

Channel name.

" + }, + "latencyMode":{ + "shape":"ChannelLatencyMode", + "documentation":"

Channel latency mode. Default: LOW.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Array of 1-50 maps, each of the form string:string (key:value).

" + } + }, + "documentation":"

Summary information about a channel.

" + }, + "ChannelType":{ + "type":"string", + "enum":[ + "BASIC", + "STANDARD" + ] + }, + "Channels":{ + "type":"list", + "member":{"shape":"Channel"} + }, + "ConflictException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

Updating or deleting a resource can cause an inconsistent state.

" + } + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateChannelRequest":{ + "type":"structure", + "members":{ + "name":{ + "shape":"ChannelName", + "documentation":"

Channel name.

" + }, + "latencyMode":{ + "shape":"ChannelLatencyMode", + "documentation":"

Channel latency mode. Default: LOW.

" + }, + "type":{ + "shape":"ChannelType", + "documentation":"

Channel type, which determines the allowable resolution and bitrate. STANDARD: The stream is transcoded; resolution (width, in landscape orientation) can be up to 1080p or the input source resolution, whichever is lower; and bitrate can be up to 8.5 Mbps. BASIC: The stream is transfixed; resolution can be up to 480p; and bitrate can be up to 1.5 Mbps. Default: STANDARD.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

See Channel$tags.

" + } + } + }, + "CreateChannelResponse":{ + "type":"structure", + "members":{ + "channel":{"shape":"Channel"}, + "streamKey":{"shape":"StreamKey"} + } + }, + "CreateStreamKeyRequest":{ + "type":"structure", + "required":["channelArn"], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

ARN of the channel for which to create the stream key.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

See Channel$tags.

" + } + } + }, + "CreateStreamKeyResponse":{ + "type":"structure", + "members":{ + "streamKey":{ + "shape":"StreamKey", + "documentation":"

Stream key used to authenticate an RTMP stream for ingestion.

" + } + } + }, + "DeleteChannelRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"

ARN of the channel to be deleted.

" + } + } + }, + "DeleteStreamKeyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"StreamKeyArn", + "documentation":"

ARN of the stream key to be deleted.

" + } + } + }, + "GetChannelRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"

ARN of the channel for which the configuration is to be retrieved.

" + } + } + }, + "GetChannelResponse":{ + "type":"structure", + "members":{ + "channel":{"shape":"Channel"} + } + }, + "GetStreamKeyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"StreamKeyArn", + "documentation":"

ARN for the stream key to be retrieved.

" + } + } + }, + "GetStreamKeyResponse":{ + "type":"structure", + "members":{ + "streamKey":{"shape":"StreamKey"} + } + }, + "GetStreamRequest":{ + "type":"structure", + "required":["channelArn"], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

Channel ARN for stream to be accessed.

" + } + } + }, + "GetStreamResponse":{ + "type":"structure", + "members":{ + "stream":{"shape":"Stream"} + } + }, + "IngestEndpoint":{"type":"string"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

Unexpected error during processing of request.

" + } + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "ListChannelsRequest":{ + "type":"structure", + "members":{ + "filterByName":{ + "shape":"ChannelName", + "documentation":"

Filters the channel list to match the specified name.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The first channel to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxChannelResults", + "documentation":"

Maximum number of channels to return.

" + } + } + }, + "ListChannelsResponse":{ + "type":"structure", + "required":["channels"], + "members":{ + "channels":{ + "shape":"ChannelList", + "documentation":"

List of the matching channels.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more channels than maxResults, use nextToken in the request to get the next set.

" + } + } + }, + "ListStreamKeysRequest":{ + "type":"structure", + "required":["channelArn"], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

Channel ARN used to filter the list.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The first stream key to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxStreamKeyResults", + "documentation":"

Maximum number of streamKeys to return.

" + } + } + }, + "ListStreamKeysResponse":{ + "type":"structure", + "required":["streamKeys"], + "members":{ + "streamKeys":{ + "shape":"StreamKeyList", + "documentation":"

List of stream keys.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more stream keys than maxResults, use nextToken in the request to get the next set.

" + } + } + }, + "ListStreamsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The first stream to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxStreamResults", + "documentation":"

Maximum number of streams to return.

" + } + } + }, + "ListStreamsResponse":{ + "type":"structure", + "required":["streams"], + "members":{ + "streams":{ + "shape":"StreamList", + "documentation":"

List of streams.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more streams than maxResults, use nextToken in the request to get the next set.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The ARN of the resource to be retrieved.

", + "location":"uri", + "locationName":"resourceArn" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The first tag to retrieve. This is used for pagination; see the nextToken response field.

" + }, + "maxResults":{ + "shape":"MaxTagResults", + "documentation":"

Maximum number of tags to return.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{"shape":"Tags"}, + "nextToken":{ + "shape":"String", + "documentation":"

If there are more tags than maxResults, use nextToken in the request to get the next set.

" + } + } + }, + "MaxChannelResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MaxStreamKeyResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MaxStreamResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "MaxTagResults":{ + "type":"integer", + "max":50, + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":500, + "min":0 + }, + "PlaybackURL":{"type":"string"}, + "PutMetadataRequest":{ + "type":"structure", + "required":[ + "channelArn", + "metadata" + ], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

ARN of the channel into which metadata is inserted. This channel must have an active stream.

" + }, + "metadata":{ + "shape":"StreamMetadata", + "documentation":"

Metadata to insert into the stream. Maximum: 1 KB per request.

" + } + } + }, + "ResourceArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:aws:[is]vs:[a-z0-9-]+:[0-9]+:[a-z-]/[a-zA-Z0-9-]+$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

Request references a resource which does not exist.

" + } + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

Request would cause a service quota to be exceeded.

" + } + }, + "error":{"httpStatusCode":402}, + "exception":true + }, + "StopStreamRequest":{ + "type":"structure", + "required":["channelArn"], + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

ARN of the channel for which the stream is to be stopped.

" + } + } + }, + "StopStreamResponse":{ + "type":"structure", + "members":{ + } + }, + "Stream":{ + "type":"structure", + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

Channel ARN for the stream.

" + }, + "playbackUrl":{ + "shape":"PlaybackURL", + "documentation":"

URL of the video master manifest, required by the video player to play the HLS stream.

" + }, + "startTime":{ + "shape":"StreamStartTime", + "documentation":"

ISO-8601 formatted timestamp of the stream’s start.

" + }, + "state":{ + "shape":"StreamState", + "documentation":"

The stream’s state.

" + }, + "health":{ + "shape":"StreamHealth", + "documentation":"

The stream’s health.

" + }, + "viewerCount":{ + "shape":"StreamViewerCount", + "documentation":"

Number of current viewers of the stream.

" + } + }, + "documentation":"

Specifies a live video stream that has been ingested and distributed.

" + }, + "StreamHealth":{ + "type":"string", + "enum":[ + "HEALTHY", + "STARVING", + "UNKNOWN" + ] + }, + "StreamKey":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"StreamKeyArn", + "documentation":"

Stream-key ARN.

" + }, + "value":{ + "shape":"StreamKeyValue", + "documentation":"

Stream-key value.

" + }, + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

Channel ARN for the stream.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Array of 1-50 maps, each of the form string:string (key:value)

" + } + }, + "documentation":"

Object specifying a stream key.

" + }, + "StreamKeyArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:aws:[is]vs:[a-z0-9-]+:[0-9]+:stream-key/[a-zA-Z0-9-]+$" + }, + "StreamKeyArnList":{ + "type":"list", + "member":{"shape":"StreamKeyArn"}, + "max":50, + "min":1 + }, + "StreamKeyList":{ + "type":"list", + "member":{"shape":"StreamKeySummary"} + }, + "StreamKeySummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"StreamKeyArn", + "documentation":"

Stream-key ARN.

" + }, + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

Channel ARN for the stream.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Array of 1-50 maps, each of the form string:string (key:value)

" + } + }, + "documentation":"

Summary information about a stream key.

" + }, + "StreamKeyValue":{"type":"string"}, + "StreamKeys":{ + "type":"list", + "member":{"shape":"StreamKey"} + }, + "StreamList":{ + "type":"list", + "member":{"shape":"StreamSummary"} + }, + "StreamMetadata":{"type":"string"}, + "StreamStartTime":{"type":"timestamp"}, + "StreamState":{ + "type":"string", + "enum":[ + "LIVE", + "OFFLINE" + ] + }, + "StreamSummary":{ + "type":"structure", + "members":{ + "channelArn":{ + "shape":"ChannelArn", + "documentation":"

Channel ARN for the stream.

" + }, + "state":{ + "shape":"StreamState", + "documentation":"

The stream’s state.

" + }, + "health":{ + "shape":"StreamHealth", + "documentation":"

The stream’s health.

" + }, + "viewerCount":{ + "shape":"StreamViewerCount", + "documentation":"

Number of current viewers of the stream.

" + }, + "startTime":{ + "shape":"StreamStartTime", + "documentation":"

ISO-8601 formatted timestamp of the stream’s start.

" + } + }, + "documentation":"

Summary information about a stream.

" + }, + "StreamUnavailable":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

The stream is temporarily unavailable.

" + } + }, + "error":{"httpStatusCode":503}, + "exception":true + }, + "StreamViewerCount":{"type":"long"}, + "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

ARN of the resource for which tags are to be added or updated.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Array of tags to be added or updated.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

Request was denied due to request throttling.

" + } + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

ARN of the resource for which tags are to be removed.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

Array of tags to be removed.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateChannelRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"

ARN of the channel to be updated.

" + }, + "name":{ + "shape":"ChannelName", + "documentation":"

Channel name.

" + }, + "latencyMode":{ + "shape":"ChannelLatencyMode", + "documentation":"

Channel latency mode. Default: LOW.

" + }, + "type":{ + "shape":"ChannelType", + "documentation":"

Channel type, which determines the allowable resolution and bitrate. STANDARD: The stream is transcoded; resolution (width, in landscape orientation) can be up to 1080p or the input source resolution, whichever is lower; and bitrate can be up to 8.5 Mbps. BASIC: The stream is transfixed; resolution can be up to 480p; and bitrate can be up to 1.5 Mbps. Default STANDARD.

" + } + } + }, + "UpdateChannelResponse":{ + "type":"structure", + "members":{ + "channel":{"shape":"Channel"} + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "exceptionMessage":{ + "shape":"errorMessage", + "documentation":"

The input fails to satisfy the constraints specified by an AWS service.

" + } + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "errorCode":{"type":"string"}, + "errorMessage":{"type":"string"} + }, + "documentation":"

Introduction

The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an AWS SNS event stream for responses. JSON is used for both requests and responses, including errors.

The API is an AWS regional service, currently in these regions: us-west-2, us-east-2, and eu-west-1.

All API request parameters and URLs are case sensitive.

For a summary of notable documentation changes in each release, see Document History.

Allowed Header Values

  • Accept: application/json

  • Accept-Encoding: gzip, deflate

  • Content-Type: application/json

Resources

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS):

  • Channel — Stores configuration data related to your live stream. You first create a channel and then use the channel’s stream key to start your live stream. See the Channel endpoints for more information.

  • Stream key — An identifier assigned by Amazon IVS when you create a channel, which is then used to authorize streaming. See the StreamKey endpoints for more information. Treat the stream key like a secret, since it allows anyone to stream to the channel.

Tagging

A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags.

Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels and Stream Keys.

API Endpoints

Channel:

  • CreateChannel — Creates a new channel and an associated stream key to start streaming.

  • GetChannel — Gets the channel configuration for a specified channel ARN (Amazon Resource Name).

  • BatchGetChannel — Performs GetChannel on multiple ARNs simultaneously.

  • ListChannels — Gets summary information about channels. This list can be filtered to match a specified string.

  • UpdateChannel — Updates a channel's configuration. This does not affect an ongoing stream of this channel. You must stop and restart the stream for the changes to take effect.

  • DeleteChannel — Deletes a specified channel.

StreamKey:

  • CreateStreamKey — Creates a stream key, used to initiate a stream, for a specified channel ARN.

  • GetStreamKey — Gets stream key information for the specified ARN.

  • BatchGetStreamKey — Performs GetStreamKey on multiple ARNs simultaneously.

  • ListStreamKeys — Gets a list of stream keys. The list can be filtered to a particular channel.

  • DeleteStreamKey — Deletes the stream key for a specified ARN, so it can no longer be used to stream.

Stream:

  • GetStream — Gets information about the active (live) stream on a specified channel.

  • ListStreams — Gets summary information about live streams.

  • StopStream — Disconnects a streamer on a specified channel. This disconnects the incoming RTMP stream from the client. Can be used in conjunction with DeleteStreamKey to prevent further streaming to a channel.

  • PutMetadata Inserts metadata into an RTMP stream for a specified channel. A maximum of 5 requests per second per channel is allowed, each with a maximum 1KB payload.

AWS Tags:

  • TagResource — Adds or updates tags for an AWS resource with a specified ARN.

  • UntagResource — Removes tags from a resource with a specified ARN.

  • ListTagsForResource — Gets information about AWS tags for a specified ARN.

" +} diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 6155111ec63d..e7fe6daabec8 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kafka/src/main/resources/codegen-resources/service-2.json b/services/kafka/src/main/resources/codegen-resources/service-2.json index b6d0617faecc..08713b05db62 100644 --- a/services/kafka/src/main/resources/codegen-resources/service-2.json +++ b/services/kafka/src/main/resources/codegen-resources/service-2.json @@ -335,6 +335,53 @@ ], "documentation": "\n

A list of brokers that a client application can use to bootstrap.

\n " }, + "GetCompatibleKafkaVersions" : { + "name" : "GetCompatibleKafkaVersions", + "http" : { + "method" : "GET", + "requestUri" : "/v1/compatible-kafka-versions", + "responseCode" : 200 + }, + "input" : { + "shape" : "GetCompatibleKafkaVersionsRequest" + }, + "output" : { + "shape" : "GetCompatibleKafkaVersionsResponse", + "documentation": "\n

Successful response.

\n " + }, + "errors" : + [ + { + "shape" : "BadRequestException", + "documentation" : "n

The request isn't valid because the input is incorrect. Correct your input and then submit it again.

n " + }, + { + "shape" : "UnauthorizedException", + "documentation" : "n

The request is not authorized. The provided credentials couldn't be validated.

n " + }, + { + "shape" : "InternalServerErrorException", + "documentation" : "n

There was an unexpected internal server error. Retrying your request might resolve the issue.

n " + }, + { + "shape" : "ForbiddenException", + "documentation" : "n

Access forbidden. Check your credentials and then retry your request.

n " + }, + { + "shape" : "NotFoundException", + "documentation" : "n

The resource could not be found due to incorrect input. Correct the input, then retry the request.

n " + }, + { + "shape" : "ServiceUnavailableException", + "documentation" : "n

503 response

n " + }, + { + "shape" : "TooManyRequestsException", + "documentation" : "n

429 response

n " + } + ], + "documentation": "\n

Gets the Apache Kafka versions to which you can update the MSK cluster.

\n " + }, "ListClusterOperations": { "name": "ListClusterOperations", "http": { @@ -750,6 +797,52 @@ ], "documentation": "\n

Updates the cluster with the configuration that is specified in the request body.

\n " }, + "UpdateClusterKafkaVersion" : { + "name" : "UpdateClusterKafkaVersion", + "http" : { + "method" : "PUT", + "requestUri" : "/v1/clusters/{clusterArn}/version", + "responseCode" : 200 + }, + "input" : { + "shape" : "UpdateClusterKafkaVersionRequest" + }, + "output" : { + "shape" : "UpdateClusterKafkaVersionResponse", + "documentation" : "\n

Successful response.

\n " + }, + "errors" : [ + { + "shape" : "BadRequestException", + "documentation" : "\n

The request isn't valid because the input is incorrect. Correct your input and then submit it again.

\n " + }, + { + "shape" : "UnauthorizedException", + "documentation" : "\n

The request is not authorized. The provided credentials couldn't be validated.

\n " + }, + { + "shape" : "InternalServerErrorException", + "documentation" : "\n

There was an unexpected internal server error. Retrying your request might resolve the issue.

\n " + }, + { + "shape" : "ForbiddenException", + "documentation" : "\n

Access forbidden. Check your credentials and then retry your request.

\n " + }, + { + "shape" : "NotFoundException", + "documentation" : "\n

The resource could not be found due to incorrect input. Correct the input, then retry the request.

\n " + }, + { + "shape" : "ServiceUnavailableException", + "documentation" : "\n

503 response

\n " + }, + { + "shape" : "TooManyRequestsException", + "documentation" : "\n

429 response

\n " + } + ], + "documentation": "\n

Updates the Apache Kafka version for the cluster.

\n " + }, "UpdateMonitoring" : { "name" : "UpdateMonitoring", "http" : { @@ -1101,6 +1194,11 @@ "locationName": "operationState", "documentation": "\n

State of the cluster operation.

\n " }, + "OperationSteps" : { + "shape" : "__listOfClusterOperationStep", + "locationName" : "operationSteps", + "documentation" : "\n

Steps completed during the operation.

\n " + }, "OperationType": { "shape": "__string", "locationName": "operationType", @@ -1119,6 +1217,33 @@ }, "documentation": "\n

Returns information about a cluster operation.

\n " }, + "ClusterOperationStep" : { + "type" : "structure", + "members" : { + "StepInfo" : { + "shape" : "ClusterOperationStepInfo", + "locationName" : "stepInfo", + "documentation" : "\n

Information about the step and its status.

\n " + }, + "StepName" : { + "shape" : "__string", + "locationName" : "stepName", + "documentation" : "\n

The name of the step.

\n " + } + }, + "documentation" : "\n

Step taken during a cluster operation.

\n " + }, + "ClusterOperationStepInfo" : { + "type" : "structure", + "members" : { + "StepStatus" : { + "shape" : "__string", + "locationName" : "stepStatus", + "documentation" : "\n

The steps current status.

\n " + } + }, + "documentation" : "\n

State information about the operation step.

\n " + }, "ClusterState": { "type": "string", "documentation": "\n

The state of a Kafka cluster.

\n ", @@ -1130,6 +1255,22 @@ "FAILED" ] }, + "CompatibleKafkaVersion" : { + "type" : "structure", + "members" : { + "SourceVersion" : { + "shape" : "__string", + "locationName" : "sourceVersion", + "documentation": "\n

A Kafka version.

\n " + }, + "TargetVersions" : { + "shape" : "__listOf__string", + "locationName" : "targetVersions", + "documentation": "\n

A list of Kafka versions.

\n " + } + }, + "documentation": "\n

Contains source Kafka versions and compatible target Kafka versions.

\n " + }, "Configuration": { "type": "structure", "members": { @@ -1350,7 +1491,6 @@ }, "required": [ "ServerProperties", - "KafkaVersions", "Name" ] }, @@ -1723,6 +1863,27 @@ } } }, + "GetCompatibleKafkaVersionsRequest" : { + "type" : "structure", + "members" : { + "ClusterArn" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "clusterArn", + "documentation": "\n

The Amazon Resource Name (ARN) of the cluster check.

\n " + } + } + }, + "GetCompatibleKafkaVersionsResponse" : { + "type" : "structure", + "members" : { + "CompatibleKafkaVersions" : { + "shape" : "__listOfCompatibleKafkaVersion", + "locationName" : "compatibleKafkaVersions", + "documentation": "\n

A list of CompatibleKafkaVersion objects.

\n " + } + } + }, "InternalServerErrorException": { "type": "structure", "members": { @@ -2053,6 +2214,11 @@ "locationName" : "openMonitoring", "documentation" : "\n

The settings for open monitoring.

\n " }, + "KafkaVersion" : { + "shape" : "__string", + "locationName" : "kafkaVersion", + "documentation" : "\n

The Kafka version.

\n " + }, "LoggingInfo": { "shape": "LoggingInfo", "locationName": "loggingInfo" @@ -2504,6 +2670,48 @@ } } }, + "UpdateClusterKafkaVersionRequest" : { + "type" : "structure", + "members" : { + "ClusterArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "clusterArn", + "documentation" : "\n

The Amazon Resource Name (ARN) of the cluster to be updated.

\n " + }, + "ConfigurationInfo" : { + "shape" : "ConfigurationInfo", + "locationName" : "configurationInfo", + "documentation": "\n

The custom configuration that should be applied on the new version of cluster.

\n " + }, + "CurrentVersion" : { + "shape" : "__string", + "locationName" : "currentVersion", + "documentation": "\n

Current cluster version.

\n " + }, + "TargetKafkaVersion" : { + "shape" : "__string", + "locationName" : "targetKafkaVersion" + ,"documentation": "\n

Target Kafka version.

\n " + } + }, + "required" : [ "ClusterArn", "TargetKafkaVersion", "CurrentVersion" ] + }, + "UpdateClusterKafkaVersionResponse" : { + "type" : "structure", + "members" : { + "ClusterArn" : { + "shape" : "__string", + "locationName" : "clusterArn", + "documentation": "\n

The Amazon Resource Name (ARN) of the cluster.

\n " + }, + "ClusterOperationArn" : { + "shape" : "__string", + "locationName" : "clusterOperationArn", + "documentation": "\n

The Amazon Resource Name (ARN) of the cluster operation.

\n " + } + } + }, "UpdateMonitoringRequest" : { "type" : "structure", "members" : { @@ -2622,6 +2830,18 @@ "shape": "ClusterOperationInfo" } }, + "__listOfClusterOperationStep" : { + "type" : "list", + "member" : { + "shape" : "ClusterOperationStep" + } + }, + "__listOfCompatibleKafkaVersion" : { + "type" : "list", + "member" : { + "shape" : "CompatibleKafkaVersion" + } + }, "__listOfConfiguration": { "type": "list", "member": { diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index fb74d1a6eaee..bb599200cf1d 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendra/src/main/resources/codegen-resources/service-2.json b/services/kendra/src/main/resources/codegen-resources/service-2.json index 889b81c83d4d..893bba613058 100644 --- a/services/kendra/src/main/resources/codegen-resources/service-2.json +++ b/services/kendra/src/main/resources/codegen-resources/service-2.json @@ -109,6 +109,23 @@ ], "documentation":"

Creates a new Amazon Kendra index. Index creation is an asynchronous operation. To determine if index creation has completed, check the Status field returned from a call to . The Status field is set to ACTIVE when the index is ready to use.

Once the index is active you can index your documents using the operation or using one of the supported data sources.

" }, + "DeleteDataSource":{ + "name":"DeleteDataSource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDataSourceRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes an Amazon Kendra data source. An exception is not thrown if the data source is already being deleted. While the data source is being deleted, the Status field returned by a call to the operation is set to DELETING. For more information, see Deleting Data Sources.

" + }, "DeleteFaq":{ "name":"DeleteFaq", "http":{ @@ -262,6 +279,23 @@ ], "documentation":"

Lists the Amazon Kendra indexes that you have created.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets a list of tags associated with a specified resource. Indexes, FAQs, and data sources can have tags associated with them.

" + }, "Query":{ "name":"Query", "http":{ @@ -276,6 +310,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], "documentation":"

Searches an active index. Use this API to search your documents using query. The Query operation enables to do faceted search and to filter results based on document attributes.

It also enables you to provide user context that Amazon Kendra uses to enforce document access control in the search results.

Amazon Kendra searches your index for text content and question and answer (FAQ) content. By default the response contains three types of results.

  • Relevant passages

  • Matching FAQs

  • Relevant documents

You can specify that the query return only one type of result using the QueryResultTypeConfig parameter.

" @@ -332,6 +367,40 @@ ], "documentation":"

Enables you to provide feedback to Amazon Kendra to improve the performance of the service.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Adds the specified tag to the specified index, FAQ, or data source resource. If the tag already exists, the existing value is replaced with the new value.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Removes a tag from an index, FAQ, or a data source.

" + }, "UpdateDataSource":{ "name":"UpdateDataSource", "http":{ @@ -362,6 +431,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], "documentation":"

Updates an existing Amazon Kendra index.

" @@ -407,18 +477,18 @@ "members":{ "Key":{ "shape":"String", - "documentation":"

" + "documentation":"

The key that identifies the attribute.

" }, "ValueType":{ "shape":"AdditionalResultAttributeValueType", - "documentation":"

" + "documentation":"

The data type of the Value property.

" }, "Value":{ "shape":"AdditionalResultAttributeValue", - "documentation":"

" + "documentation":"

An object that contains the attribute value.

" } }, - "documentation":"

" + "documentation":"

An attribute returned from an index query.

" }, "AdditionalResultAttributeList":{ "type":"list", @@ -438,6 +508,11 @@ "type":"string", "enum":["TEXT_WITH_HIGHLIGHTS_VALUE"] }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, "AttributeFilter":{ "type":"structure", "members":{ @@ -459,11 +534,11 @@ }, "ContainsAll":{ "shape":"DocumentAttribute", - "documentation":"

Returns true when a document contains all of the specified document attributes.

" + "documentation":"

Returns true when a document contains all of the specified document attributes. This filter is only appicable to StringListValue metadata.

" }, "ContainsAny":{ "shape":"DocumentAttribute", - "documentation":"

Returns true when a document contains any of the specified document attributes.

" + "documentation":"

Returns true when a document contains any of the specified document attributes.This filter is only appicable to StringListValue metadata.

" }, "GreaterThan":{ "shape":"DocumentAttribute", @@ -482,13 +557,11 @@ "documentation":"

Performs a less than or equals operation on two document attributes. Use with a document attribute of type Integer or Long.

" } }, - "documentation":"

Provides filtering the query results based on document attributes.

When you use the AndAllFilters or OrAllFilters, filters you can use a total of 3 layers. For example, you can use:

  1. <AndAllFilters>

  2. <OrAllFilters>

  3. <EqualTo>

" + "documentation":"

Provides filtering the query results based on document attributes.

When you use the AndAllFilters or OrAllFilters, filters you can use 2 layers under the first attribute filter. For example, you can use:

<AndAllFilters>

  1. <OrAllFilters>

  2. <EqualTo>

If you use more than 2 layers, you receive a ValidationException exception with the message \"AttributeFilter cannot have a depth of more than 2.\"

" }, "AttributeFilterList":{ "type":"list", - "member":{"shape":"AttributeFilter"}, - "max":5, - "min":1 + "member":{"shape":"AttributeFilter"} }, "BatchDeleteDocumentRequest":{ "type":"structure", @@ -504,7 +577,8 @@ "DocumentIdList":{ "shape":"DocumentIdList", "documentation":"

One or more identifiers for documents to delete from the index.

" - } + }, + "DataSourceSyncJobMetricTarget":{"shape":"DataSourceSyncJobMetricTarget"} } }, "BatchDeleteDocumentResponse":{ @@ -555,7 +629,7 @@ }, "Documents":{ "shape":"DocumentList", - "documentation":"

One or more documents to add to the index.

Each document is limited to 5 Mb, the total size of the list is limited to 50 Mb.

" + "documentation":"

One or more documents to add to the index.

Documents have the following file size limits.

  • 5 MB total size for inline documents

  • 50 MB total size for files from an S3 bucket

  • 5 MB extracted text for any file

For more information about file size and transaction per second quotas, see Quotas.

" } } }, @@ -564,7 +638,7 @@ "members":{ "FailedDocuments":{ "shape":"BatchPutDocumentResponseFailedDocuments", - "documentation":"

A list of documents that were not added to the index because the document failed a validation check. Each document contains an error message that indicates why the document couldn't be added to the index.

If there was an error adding a document to an index the error is reported in your AWS CloudWatch log.

" + "documentation":"

A list of documents that were not added to the index because the document failed a validation check. Each document contains an error message that indicates why the document couldn't be added to the index.

If there was an error adding a document to an index the error is reported in your AWS CloudWatch log. For more information, see Monitoring Amazon Kendra with Amazon CloudWatch Logs

" } } }, @@ -590,12 +664,26 @@ "type":"list", "member":{"shape":"BatchPutDocumentResponseFailedDocument"} }, - "Blob":{ - "type":"blob", - "max":153600, - "min":1 - }, + "Blob":{"type":"blob"}, "Boolean":{"type":"boolean"}, + "CapacityUnitsConfiguration":{ + "type":"structure", + "required":[ + "StorageCapacityUnits", + "QueryCapacityUnits" + ], + "members":{ + "StorageCapacityUnits":{ + "shape":"StorageCapacityUnit", + "documentation":"

The amount of extra storage capacity for an index. Each capacity unit provides 150 Gb of storage space or 500,000 documents, whichever is reached first.

" + }, + "QueryCapacityUnits":{ + "shape":"QueryCapacityUnit", + "documentation":"

The amount of extra query capacity for an index. Each capacity unit provides 0.5 queries per second and 40,000 queries per day.

" + } + }, + "documentation":"

Specifies capacity units configured for your index. You can add and remove capacity units to tune an index to your requirements.

" + }, "ChangeDetectingColumns":{ "type":"list", "member":{"shape":"ColumnName"}, @@ -615,7 +703,7 @@ }, "ClickTime":{ "shape":"Timestamp", - "documentation":"

The Unix timestamp of the data and time that the result was clicked.

" + "documentation":"

The Unix timestamp of the date and time that the result was clicked.

" } }, "documentation":"

Gathers information about when a particular result was clicked by a user. Your application uses the SubmitFeedback operation to provide click information.

" @@ -754,6 +842,10 @@ "RoleArn":{ "shape":"RoleArn", "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source. For more information, see IAM Roles for Amazon Kendra.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of key-value pairs that identify the data source. You can use the tags to identify and organize your resources and to control access to resources.

" } } }, @@ -795,6 +887,10 @@ "RoleArn":{ "shape":"RoleArn", "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the S3 bucket that contains the FAQs. For more information, see IAM Roles for Amazon Kendra.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of key-value pairs that identify the FAQ. You can use the tags to identify and organize your resources and to control access to resources.

" } } }, @@ -818,6 +914,10 @@ "shape":"IndexName", "documentation":"

The name for the new index.

" }, + "Edition":{ + "shape":"IndexEdition", + "documentation":"

The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for your production databases. Once you set the edition for an index, it can't be changed.

" + }, "RoleArn":{ "shape":"RoleArn", "documentation":"

An IAM role that gives Amazon Kendra permissions to access your Amazon CloudWatch logs and metrics. This is also the role used when you use the BatchPutDocument operation to index documents from an Amazon S3 bucket.

" @@ -834,6 +934,10 @@ "shape":"ClientTokenName", "documentation":"

A token that you provide to identify the request to create an index. Multiple calls to the CreateIndex operation with the same client token will create only one index.”

", "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of key-value pairs that identify the index. You can use the tags to identify and organize your resources and to control access to resources.

" } } }, @@ -860,6 +964,18 @@ "DatabaseConfiguration":{ "shape":"DatabaseConfiguration", "documentation":"

Provides information necessary to create a connector for a database.

" + }, + "SalesforceConfiguration":{ + "shape":"SalesforceConfiguration", + "documentation":"

Provides configuration information for data sources that connect to a Salesforce site.

" + }, + "OneDriveConfiguration":{ + "shape":"OneDriveConfiguration", + "documentation":"

Provided configuration for data sources that connect to Microsoft OneDrive.

" + }, + "ServiceNowConfiguration":{ + "shape":"ServiceNowConfiguration", + "documentation":"

Provides configuration for data sources that connect to ServiceNow instances.

" } }, "documentation":"

Configuration information for a Amazon Kendra data source.

" @@ -867,7 +983,8 @@ "DataSourceDateFieldFormat":{ "type":"string", "max":40, - "min":4 + "min":4, + "pattern":"^(?!\\s).*(?If the reason that the synchronization failed is due to an error with the underlying data source, this field contains a code that identifies the error.

" + }, + "Metrics":{ + "shape":"DataSourceSyncJobMetrics", + "documentation":"

Maps a batch delete document request to a specific data source sync job. This is optional and should only be supplied when documents are deleted by a connector.

" } }, "documentation":"

Provides information about a synchronization job.

" @@ -980,6 +1101,56 @@ "type":"list", "member":{"shape":"DataSourceSyncJob"} }, + "DataSourceSyncJobId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "DataSourceSyncJobMetricTarget":{ + "type":"structure", + "required":[ + "DataSourceId", + "DataSourceSyncJobId" + ], + "members":{ + "DataSourceId":{ + "shape":"DataSourceId", + "documentation":"

The ID of the data source that is running the sync job.

" + }, + "DataSourceSyncJobId":{ + "shape":"DataSourceSyncJobId", + "documentation":"

The ID of the sync job that is running on the data source.

" + } + }, + "documentation":"

Maps a particular data source sync job to a particular data source.

" + }, + "DataSourceSyncJobMetrics":{ + "type":"structure", + "members":{ + "DocumentsAdded":{ + "shape":"MetricValue", + "documentation":"

The number of documents added from the data source up to now in the data source sync.

" + }, + "DocumentsModified":{ + "shape":"MetricValue", + "documentation":"

The number of documents modified in the data source up to now in the data source sync run.

" + }, + "DocumentsDeleted":{ + "shape":"MetricValue", + "documentation":"

The number of documents deleted from the data source up to now in the data source sync run.

" + }, + "DocumentsFailed":{ + "shape":"MetricValue", + "documentation":"

The number of documents that failed to sync from the data source up to now in the data source sync run.

" + }, + "DocumentsScanned":{ + "shape":"MetricValue", + "documentation":"

The current number of documents crawled by the current sync job in the data source.

" + } + }, + "documentation":"

Maps a batch delete document request to a specific data source sync job. This is optional and should only be supplied when documents are deleted by a connector.

" + }, "DataSourceSyncJobStatus":{ "type":"string", "enum":[ @@ -988,7 +1159,8 @@ "SYNCING", "INCOMPLETE", "STOPPING", - "ABORTED" + "ABORTED", + "SYNCING_INDEXING" ] }, "DataSourceToIndexFieldMapping":{ @@ -1024,7 +1196,10 @@ "enum":[ "S3", "SHAREPOINT", - "DATABASE" + "DATABASE", + "SALESFORCE", + "ONEDRIVE", + "SERVICENOW" ] }, "DataSourceVpcConfiguration":{ @@ -1098,6 +1273,23 @@ "max":65535, "min":1 }, + "DeleteDataSourceRequest":{ + "type":"structure", + "required":[ + "Id", + "IndexId" + ], + "members":{ + "Id":{ + "shape":"DataSourceId", + "documentation":"

The unique identifier of the data source to delete.

" + }, + "IndexId":{ + "shape":"IndexId", + "documentation":"

The unique identifier of the index associated with the data source.

" + } + } + }, "DeleteFaqRequest":{ "type":"structure", "required":[ @@ -1275,6 +1467,10 @@ "shape":"IndexId", "documentation":"

the name of the index.

" }, + "Edition":{ + "shape":"IndexEdition", + "documentation":"

The Amazon Kendra edition used for the index. You decide the edition when you create the index.

" + }, "RoleArn":{ "shape":"RoleArn", "documentation":"

The Amazon Resource Name (ARN) of the IAM role that gives Amazon Kendra permission to write to your Amazon Cloudwatch logs.

" @@ -1310,6 +1506,10 @@ "ErrorMessage":{ "shape":"ErrorMessage", "documentation":"

When th eStatus field value is FAILED, the ErrorMessage field contains a message that explains why.

" + }, + "CapacityUnits":{ + "shape":"CapacityUnitsConfiguration", + "documentation":"

For enterprise edtion indexes, you can choose to use additional capacity to meet the needs of your application. This contains the capacity units used for the index. A 0 for the query capacity or the storage capacity indicates that the index is using the default capacity for the index.

" } } }, @@ -1383,15 +1583,11 @@ }, "DocumentAttributeList":{ "type":"list", - "member":{"shape":"DocumentAttribute"}, - "max":100, - "min":1 + "member":{"shape":"DocumentAttribute"} }, "DocumentAttributeStringListValue":{ "type":"list", - "member":{"shape":"String"}, - "max":5, - "min":1 + "member":{"shape":"String"} }, "DocumentAttributeStringValue":{ "type":"string", @@ -1539,7 +1735,7 @@ "documentation":"

The unique key for the document attribute.

" } }, - "documentation":"

Information a document attribute

" + "documentation":"

Information about a document attribute

" }, "FacetList":{ "type":"list", @@ -1673,6 +1869,10 @@ "shape":"IndexId", "documentation":"

A unique identifier for the index. Use this to identify the index when you are using operations such as Query, DescribeIndex, UpdateIndex, and DeleteIndex.

" }, + "Edition":{ + "shape":"IndexEdition", + "documentation":"

Indicates whether the index is a enterprise edition index or a developer edition index.

" + }, "CreatedAt":{ "shape":"Timestamp", "documentation":"

The Unix timestamp when the index was created.

" @@ -1692,6 +1892,13 @@ "type":"list", "member":{"shape":"IndexConfigurationSummary"} }, + "IndexEdition":{ + "type":"string", + "enum":[ + "DEVELOPER_EDITION", + "ENTERPRISE_EDITION" + ] + }, "IndexFieldName":{ "type":"string", "max":30, @@ -1735,6 +1942,7 @@ "ACTIVE", "DELETING", "FAILED", + "UPDATING", "SYSTEM_UPDATING" ] }, @@ -1742,6 +1950,10 @@ "type":"integer", "min":0 }, + "IndexedTextBytes":{ + "type":"long", + "min":0 + }, "IndexedTextDocumentsCount":{ "type":"integer", "min":0 @@ -1896,6 +2108,25 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the index, FAQ, or data source to get a list of tags for.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags associated with the index, FAQ, or data source.

" + } + } + }, "Long":{"type":"long"}, "MaxResultsIntegerForListDataSourceSyncJobsRequest":{ "type":"integer", @@ -1917,11 +2148,76 @@ "max":100, "min":1 }, + "MetricValue":{ + "type":"string", + "pattern":"(([1-9][0-9]*)|0)" + }, "NextToken":{ "type":"string", "max":800, "min":1 }, + "OneDriveConfiguration":{ + "type":"structure", + "required":[ + "TenantDomain", + "SecretArn", + "OneDriveUsers" + ], + "members":{ + "TenantDomain":{ + "shape":"TenantDomain", + "documentation":"

Tha Azure Active Directory domain of the organization.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS Secrets Manager secret that contains the user name and password to connect to OneDrive. The user namd should be the application ID for the OneDrive application, and the password is the application key for the OneDrive application.

" + }, + "OneDriveUsers":{ + "shape":"OneDriveUsers", + "documentation":"

A list of user accounts whose documents should be indexed.

" + }, + "InclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

A list of regular expression patterns. Documents that match the pattern are included in the index. Documents that don't match the pattern are excluded from the index. If a document matches both an inclusion pattern and an exclusion pattern, the document is not included in the index.

The exclusion pattern is applied to the file name.

" + }, + "ExclusionPatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

List of regular expressions applied to documents. Items that match the exclusion pattern are not indexed. If you provide both an inclusion pattern and an exclusion pattern, any item that matches the exclusion pattern isn't indexed.

The exclusion pattern is applied to the file name.

" + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

A list of DataSourceToIndexFieldMapping objects that map Microsoft OneDrive fields to custom fields in the Amazon Kendra index. You must first create the index fields before you map OneDrive fields.

" + } + }, + "documentation":"

Provides configuration information for data sources that connect to OneDrive.

" + }, + "OneDriveUser":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^(?!\\s).+@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$" + }, + "OneDriveUserList":{ + "type":"list", + "member":{"shape":"OneDriveUser"}, + "max":100, + "min":1 + }, + "OneDriveUsers":{ + "type":"structure", + "members":{ + "OneDriveUserList":{ + "shape":"OneDriveUserList", + "documentation":"

A list of users whose documents should be indexed. Specify the user names in email format, for example, username@tenantdomain. If you need to index the documents of more than 100 users, use the OneDriveUserS3Path field to specify the location of a file containing a list of users.

" + }, + "OneDriveUserS3Path":{ + "shape":"S3Path", + "documentation":"

The S3 bucket location of a file containing a list of users whose documents should be indexed.

" + } + }, + "documentation":"

User accounts whose documents should be indexed.

" + }, "Order":{ "type":"string", "enum":[ @@ -1954,9 +2250,7 @@ }, "PrincipalList":{ "type":"list", - "member":{"shape":"Principal"}, - "max":200, - "min":1 + "member":{"shape":"Principal"} }, "PrincipalName":{ "type":"string", @@ -1971,6 +2265,10 @@ "GROUP" ] }, + "QueryCapacityUnit":{ + "type":"integer", + "min":0 + }, "QueryId":{ "type":"string", "max":36, @@ -2013,7 +2311,7 @@ }, "PageSize":{ "shape":"Integer", - "documentation":"

Sets the number of results that are returned in each page of results. The default page size is 100.

" + "documentation":"

Sets the number of results that are returned in each page of results. The default page size is 10. The maximum number of results returned is 100. If you ask for more than 100 results, only 100 are returned.

" } } }, @@ -2051,7 +2349,7 @@ }, "AdditionalAttributes":{ "shape":"AdditionalResultAttributeList", - "documentation":"

" + "documentation":"

One or more additional attribues associated with the query result.

" }, "DocumentId":{ "shape":"DocumentId", @@ -2219,7 +2517,7 @@ }, "ExclusionPatterns":{ "shape":"DataSourceInclusionsExclusionsStrings", - "documentation":"

A list of glob patterns for documents that should not be indexed. If a document that matches an inclusion prefix also matches an exclusion pattern, the document is not indexed.

For more information about glob patterns, see glob (programming) in Wikipedia.

" + "documentation":"

A list of glob patterns for documents that should not be indexed. If a document that matches an inclusion prefix also matches an exclusion pattern, the document is not indexed.

For more information about glob patterns, see glob (programming) in Wikipedia.

" }, "DocumentsMetadataConfiguration":{"shape":"DocumentsMetadataConfiguration"}, "AccessControlListConfiguration":{ @@ -2252,6 +2550,246 @@ }, "documentation":"

Information required to find a specific file in an Amazon S3 bucket.

" }, + "SalesforceChatterFeedConfiguration":{ + "type":"structure", + "required":["DocumentDataFieldName"], + "members":{ + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the column in the Salesforce FeedItem table that contains the content to index. Typically this is the Body column.

" + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the column in the Salesforce FeedItem table that contains the title of the document. This is typically the Title collumn.

" + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

Maps fields from a Salesforce chatter feed into Amazon Kendra index fields.

" + }, + "IncludeFilterTypes":{ + "shape":"SalesforceChatterFeedIncludeFilterTypes", + "documentation":"

Filters the documents in the feed based on status of the user. When you specify ACTIVE_USERS only documents from users who have an active account are indexed. When you specify STANDARD_USER only documents for Salesforce standard users are documented. You can specify both.

" + } + }, + "documentation":"

Defines configuration for syncing a Salesforce chatter feed. The contents of the object comes from the Salesforce FeedItem table.

" + }, + "SalesforceChatterFeedIncludeFilterType":{ + "type":"string", + "enum":[ + "ACTIVE_USER", + "STANDARD_USER" + ] + }, + "SalesforceChatterFeedIncludeFilterTypes":{ + "type":"list", + "member":{"shape":"SalesforceChatterFeedIncludeFilterType"}, + "max":2, + "min":1 + }, + "SalesforceConfiguration":{ + "type":"structure", + "required":[ + "ServerUrl", + "SecretArn" + ], + "members":{ + "ServerUrl":{ + "shape":"Url", + "documentation":"

The instance URL for the Salesforce site that you want to index.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The Amazon Resource Name (ARN) of an AWS Secrets Manager secret that contains the key/value pairs required to connect to your Salesforce instance. The secret must contain a JSON structure with the following keys:

  • authenticationUrl - The OAUTH endpoint that Amazon Kendra connects to get an OAUTH token.

  • consumerKey - The application public key generated when you created your Salesforce application.

  • consumerSecret - The application private key generated when you created your Salesforce application.

  • password - The password associated with the user logging in to the Salesforce instance.

  • securityToken - The token associated with the user account logging in to the Salesforce instance.

  • username - The user name of the user logging in to the Salesforce instance.

" + }, + "StandardObjectConfigurations":{ + "shape":"SalesforceStandardObjectConfigurationList", + "documentation":"

Specifies the Salesforce standard objects that Amazon Kendra indexes.

" + }, + "KnowledgeArticleConfiguration":{ + "shape":"SalesforceKnowledgeArticleConfiguration", + "documentation":"

Specifies configuration information for the knowlege article types that Amazon Kendra indexes. Amazon Kendra indexes standard knowledge articles and the standard fields of knowledge articles, or the custom fields of custom knowledge articles, but not both.

" + }, + "ChatterFeedConfiguration":{ + "shape":"SalesforceChatterFeedConfiguration", + "documentation":"

Specifies configuration information for Salesforce chatter feeds.

" + }, + "CrawlAttachments":{ + "shape":"Boolean", + "documentation":"

Indicates whether Amazon Kendra should index attachments to Salesforce objects.

" + }, + "StandardObjectAttachmentConfiguration":{ + "shape":"SalesforceStandardObjectAttachmentConfiguration", + "documentation":"

Provides configuration information for processing attachments to Salesforce standard objects.

" + }, + "IncludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

A list of regular expression patterns. Documents that match the patterns are included in the index. Documents that don't match the patterns are excluded from the index. If a document matches both an inclusion pattern and an exclusion pattern, the document is not included in the index.

The regex is applied to the name of the attached file.

" + }, + "ExcludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

A list of regular expression patterns. Documents that match the patterns are excluded from the index. Documents that don't match the patterns are included in the index. If a document matches both an exclusion pattern and an inclusion pattern, the document is not included in the index.

The regex is applied to the name of the attached file.

" + } + }, + "documentation":"

Provides configuration information for connecting to a Salesforce data source.

" + }, + "SalesforceCustomKnowledgeArticleTypeConfiguration":{ + "type":"structure", + "required":[ + "Name", + "DocumentDataFieldName" + ], + "members":{ + "Name":{ + "shape":"SalesforceCustomKnowledgeArticleTypeName", + "documentation":"

The name of the configuration.

" + }, + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the field in the custom knowledge article that contains the document data to index.

" + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the field in the custom knowledge article that contains the document title.

" + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

One or more objects that map fields in the custom knowledge article to fields in the Amazon Kendra index.

" + } + }, + "documentation":"

Provides configuration information for indexing Salesforce custom articles.

" + }, + "SalesforceCustomKnowledgeArticleTypeConfigurationList":{ + "type":"list", + "member":{"shape":"SalesforceCustomKnowledgeArticleTypeConfiguration"}, + "max":10, + "min":1 + }, + "SalesforceCustomKnowledgeArticleTypeName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[a-zA-Z][a-zA-Z0-9_]*$" + }, + "SalesforceKnowledgeArticleConfiguration":{ + "type":"structure", + "required":["IncludedStates"], + "members":{ + "IncludedStates":{ + "shape":"SalesforceKnowledgeArticleStateList", + "documentation":"

Specifies the document states that should be included when Amazon Kendra indexes knowledge articles. You must specify at least one state.

" + }, + "StandardKnowledgeArticleTypeConfiguration":{ + "shape":"SalesforceStandardKnowledgeArticleTypeConfiguration", + "documentation":"

Provides configuration information for standard Salesforce knowledge articles.

" + }, + "CustomKnowledgeArticleTypeConfigurations":{ + "shape":"SalesforceCustomKnowledgeArticleTypeConfigurationList", + "documentation":"

Provides configuration information for custom Salesforce knowledge articles.

" + } + }, + "documentation":"

Specifies configuration information for the knowlege article types that Amazon Kendra indexes. Amazon Kendra indexes standard knowledge articles and the standard fields of knowledge articles, or the custom fields of custom knowledge articles, but not both

" + }, + "SalesforceKnowledgeArticleState":{ + "type":"string", + "enum":[ + "DRAFT", + "PUBLISHED", + "ARCHIVED" + ] + }, + "SalesforceKnowledgeArticleStateList":{ + "type":"list", + "member":{"shape":"SalesforceKnowledgeArticleState"}, + "max":3, + "min":1 + }, + "SalesforceStandardKnowledgeArticleTypeConfiguration":{ + "type":"structure", + "required":["DocumentDataFieldName"], + "members":{ + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the field that contains the document data to index.

" + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the field that contains the document title.

" + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

One or more objects that map fields in the knowledge article to Amazon Kendra index fields. The index field must exist before you can map a Salesforce field to it.

" + } + }, + "documentation":"

Provides configuration information for standard Salesforce knowledge articles.

" + }, + "SalesforceStandardObjectAttachmentConfiguration":{ + "type":"structure", + "members":{ + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the field used for the document title.

" + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

One or more objects that map fields in attachments to Amazon Kendra index fields.

" + } + }, + "documentation":"

Provides configuration information for processing attachments to Salesforce standard objects.

" + }, + "SalesforceStandardObjectConfiguration":{ + "type":"structure", + "required":[ + "Name", + "DocumentDataFieldName" + ], + "members":{ + "Name":{ + "shape":"SalesforceStandardObjectName", + "documentation":"

The name of the standard object.

" + }, + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the field in the standard object table that contains the document contents.

" + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the field in the standard object table that contains the document titleB.

" + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

One or more objects that map fields in the standard object to Amazon Kendra index fields. The index field must exist before you can map a Salesforce field to it.

" + } + }, + "documentation":"

Specifies confguration information for indexing a single standard object.

" + }, + "SalesforceStandardObjectConfigurationList":{ + "type":"list", + "member":{"shape":"SalesforceStandardObjectConfiguration"}, + "max":17, + "min":1 + }, + "SalesforceStandardObjectName":{ + "type":"string", + "enum":[ + "ACCOUNT", + "CAMPAIGN", + "CASE", + "CONTACT", + "CONTRACT", + "DOCUMENT", + "GROUP", + "IDEA", + "LEAD", + "OPPORTUNITY", + "PARTNER", + "PRICEBOOK", + "PRODUCT", + "PROFILE", + "SOLUTION", + "TASK", + "USER" + ] + }, "ScanSchedule":{"type":"string"}, "Search":{ "type":"structure", @@ -2293,6 +2831,112 @@ }, "documentation":"

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt data indexed by Amazon Kendra. Amazon Kendra doesn't support asymmetric CMKs.

" }, + "ServiceNowBuildVersionType":{ + "type":"string", + "enum":[ + "LONDON", + "OTHERS" + ] + }, + "ServiceNowConfiguration":{ + "type":"structure", + "required":[ + "HostUrl", + "SecretArn", + "ServiceNowBuildVersion" + ], + "members":{ + "HostUrl":{ + "shape":"ServiceNowHostUrl", + "documentation":"

The ServiceNow instance that the data source connects to. The host endpoint should look like the following: {instance}.service-now.com.

" + }, + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Secret Manager secret that contains the user name and password required to connect to the ServiceNow instance.

" + }, + "ServiceNowBuildVersion":{ + "shape":"ServiceNowBuildVersionType", + "documentation":"

The identifier of the release that the ServiceNow host is running. If the host is not running the LONDON release, use OTHERS.

" + }, + "KnowledgeArticleConfiguration":{ + "shape":"ServiceNowKnowledgeArticleConfiguration", + "documentation":"

Provides configuration information for crawling knowledge articles in the ServiceNow site.

" + }, + "ServiceCatalogConfiguration":{ + "shape":"ServiceNowServiceCatalogConfiguration", + "documentation":"

Provides configuration information for crawling service catalogs in the ServiceNow site.

" + } + }, + "documentation":"

Provides configuration information required to connect to a ServiceNow data source.

" + }, + "ServiceNowHostUrl":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(?!(^(https?|ftp|file):\\/\\/))[a-z0-9-]+(\\.service-now\\.com)$" + }, + "ServiceNowKnowledgeArticleConfiguration":{ + "type":"structure", + "required":["DocumentDataFieldName"], + "members":{ + "CrawlAttachments":{ + "shape":"Boolean", + "documentation":"

Indicates whether Amazon Kendra should index attachments to knowledge articles.

" + }, + "IncludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

List of regular expressions applied to knowledge articles. Items that don't match the inclusion pattern are not indexed. The regex is applied to the field specified in the PatternTargetField.

" + }, + "ExcludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

List of regular expressions applied to knowledge articles. Items that don't match the inclusion pattern are not indexed. The regex is applied to the field specified in the PatternTargetField

" + }, + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the ServiceNow field that is mapped to the index document contents field in the Amazon Kendra index.

" + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the ServiceNow field that is mapped to the index document title field.

" + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

Mapping between ServiceNow fields and Amazon Kendra index fields. You must create the index field before you map the field.

" + } + }, + "documentation":"

Provides configuration information for crawling knowledge articles in the ServiceNow site.

" + }, + "ServiceNowServiceCatalogConfiguration":{ + "type":"structure", + "required":["DocumentDataFieldName"], + "members":{ + "CrawlAttachments":{ + "shape":"Boolean", + "documentation":"

Indicates whether Amazon Kendra should crawl attachments to the service catalog items.

" + }, + "IncludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

Determines the types of file attachments that are included in the index.

" + }, + "ExcludeAttachmentFilePatterns":{ + "shape":"DataSourceInclusionsExclusionsStrings", + "documentation":"

Determines the types of file attachments that are excluded from the index.

" + }, + "DocumentDataFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the ServiceNow field that is mapped to the index document contents field in the Amazon Kendra index.

" + }, + "DocumentTitleFieldName":{ + "shape":"DataSourceFieldName", + "documentation":"

The name of the ServiceNow field that is mapped to the index document title field.

" + }, + "FieldMappings":{ + "shape":"DataSourceToIndexFieldMappingList", + "documentation":"

Mapping between ServiceNow fields and Amazon Kendra index fields. You must create the index field before you map the field.

" + } + }, + "documentation":"

Provides configuration information for crawling service catalog items in the ServiceNow site

" + }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ @@ -2335,7 +2979,7 @@ }, "ExclusionPatterns":{ "shape":"DataSourceInclusionsExclusionsStrings", - "documentation":"

A list of regular expression patterns. Documents that match the patterns are excluded from the index. Documents that don't match the patterns are included in the index. If a document matches both an exclusion pattern and an inclusion pattern, the document is not included in the index.

The regex is applied to the display URL of the SharePoint document.

" + "documentation":"

A list of regulary expression patterns. Documents that match the patterns are excluded from the index. Documents that don't match the patterns are included in the index. If a document matches both an exclusion pattern and an inclusion pattern, the document is not included in the index.

The regex is applied to the display URL of the SharePoint document.

" }, "VpcConfiguration":{"shape":"DataSourceVpcConfiguration"}, "FieldMappings":{ @@ -2402,6 +3046,10 @@ } } }, + "StorageCapacityUnit":{ + "type":"integer", + "min":0 + }, "String":{ "type":"string", "max":2048, @@ -2450,13 +3098,88 @@ "min":1, "pattern":"^[a-zA-Z][a-zA-Z0-9_]*$" }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key for the tag. Keys are not case sensitive and must be unique for the index, FAQ, or data source.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value associated with the tag. The value may be an empty string but it can't be null.

" + } + }, + "documentation":"

A list of key/value pairs that identify an index, FAQ, or data source. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the index, FAQ, or data source to tag.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag keys to add to the index, FAQ, or data source. If a tag already exists, the existing value is replaced with the new value.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TenantDomain":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^([a-zA-Z0-9]+(-[a-zA-Z0-9]+)*\\.)+[a-z]{2,}$" + }, "TextDocumentStatistics":{ "type":"structure", - "required":["IndexedTextDocumentsCount"], + "required":[ + "IndexedTextDocumentsCount", + "IndexedTextBytes" + ], "members":{ "IndexedTextDocumentsCount":{ "shape":"IndexedTextDocumentsCount", "documentation":"

The number of text documents indexed.

" + }, + "IndexedTextBytes":{ + "shape":"IndexedTextBytes", + "documentation":"

The total size, in bytes, of the indexed documents.

" } }, "documentation":"

Provides information about text documents indexed in an index.

" @@ -2498,10 +3221,28 @@ "documentation":"

Provides a range of time.

" }, "Timestamp":{"type":"timestamp"}, - "Title":{ - "type":"string", - "max":1024, - "min":1 + "Title":{"type":"string"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the index, FAQ, or data source to remove the tag from.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

A list of tag keys to remove from the index, FAQ, or data source. If a tag key does not exist on the resource, it is ignored.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } }, "UpdateDataSourceRequest":{ "type":"structure", @@ -2560,6 +3301,10 @@ "DocumentMetadataConfigurationUpdates":{ "shape":"DocumentMetadataConfigurationList", "documentation":"

The document metadata to update.

" + }, + "CapacityUnits":{ + "shape":"CapacityUnitsConfiguration", + "documentation":"

Sets the number of addtional storage and query capacity units that should be used by the index. You can change the capacity of the index up to 5 times per day.

If you are using extra storage units, you can't reduce the storage capacity below that required to meet the storage needs for your index.

" } } }, @@ -2567,7 +3312,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"^(https?|ftp|file):\\/\\/(.*)" + "pattern":"^(https?|ftp|file):\\/\\/([^\\s]*)" }, "ValidationException":{ "type":"structure", diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 1ea15c5b149c..1b30343c0849 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesis/src/test/java/software/amazon/awssdk/services/kinesis/SubscribeToShardUnmarshallingTest.java b/services/kinesis/src/test/java/software/amazon/awssdk/services/kinesis/SubscribeToShardUnmarshallingTest.java index f1d20617c8aa..b287ef83a754 100644 --- a/services/kinesis/src/test/java/software/amazon/awssdk/services/kinesis/SubscribeToShardUnmarshallingTest.java +++ b/services/kinesis/src/test/java/software/amazon/awssdk/services/kinesis/SubscribeToShardUnmarshallingTest.java @@ -29,6 +29,7 @@ import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; +import java.util.concurrent.atomic.AtomicInteger; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -160,6 +161,39 @@ public void eventWithRecords_UnmarshalledCorrectly() throws Throwable { assertThat(events).containsOnly(event); } + @Test + public void unknownEventType_UnmarshalledCorrectly() throws Throwable { + AbortableInputStream content = new MessageWriter() + .writeInitialResponse(new byte[0]) + .writeEvent("ExampleUnknownEventType", "{\"Foo\": \"Bar\"}") + .toInputStream(); + + stubResponse(SdkHttpFullResponse.builder() + .statusCode(200) + .content(content) + .build()); + + AtomicInteger unknownEvents = new AtomicInteger(0); + AtomicInteger knownEvents = new AtomicInteger(0); + + client.subscribeToShard(SubscribeToShardRequest.builder().build(), + SubscribeToShardResponseHandler.builder().subscriber(new SubscribeToShardResponseHandler.Visitor() { + @Override + public void visitDefault(SubscribeToShardEventStream event) { + unknownEvents.incrementAndGet(); + } + + @Override + public void visit(SubscribeToShardEvent event) { + knownEvents.incrementAndGet(); + } + }).build()) + .get(); + + assertThat(unknownEvents.get()).isEqualTo(1); + assertThat(knownEvents.get()).isEqualTo(0); + } + private List subscribeToShard() throws Throwable { try { List events = new ArrayList<>(); @@ -188,14 +222,21 @@ public void request(long l) { byte[] bytes = invokeSafely(() -> IoUtils.toByteArray(c)); subscriber.onNext(ByteBuffer.wrap(bytes)); }); - } finally { + subscriber.onComplete(); cf.complete(null); + } catch (Throwable e) { + subscriber.onError(e); + value.onError(e); + cf.completeExceptionally(e); } } @Override public void cancel() { + RuntimeException e = new RuntimeException(); + subscriber.onError(e); + value.onError(e); } })); return cf; diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index a00eced8f4ed..17a7cd756c7c 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index b6c6fee0d4f9..eed4ea2b8e75 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index 736ed8beecc1..15cf8a6d6c08 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json b/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json index 2e00208f0d61..47e458f03d0d 100644 --- a/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json +++ b/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json @@ -61,7 +61,8 @@ {"shape":"ClientLimitExceededException"}, {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, - {"shape":"VersionMismatchException"} + {"shape":"VersionMismatchException"}, + {"shape":"ResourceInUseException"} ], "documentation":"

Deletes a specified signaling channel. DeleteSignalingChannel is an asynchronous operation. If you don't specify the channel's current version, the most recent version is deleted.

" }, @@ -78,7 +79,8 @@ {"shape":"InvalidArgumentException"}, {"shape":"ResourceNotFoundException"}, {"shape":"NotAuthorizedException"}, - {"shape":"VersionMismatchException"} + {"shape":"VersionMismatchException"}, + {"shape":"ResourceInUseException"} ], "documentation":"

Deletes a Kinesis video stream and the data contained in the stream.

This method marks the stream for deletion, and makes the data in the stream inaccessible immediately.

To ensure that you have the latest version of the stream before deleting it, you can specify the stream version. Kinesis Video Streams assigns a version to each stream. When you update a stream, Kinesis Video Streams assigns a new version number. To get the latest stream version, use the DescribeStream API.

This operation requires permission for the KinesisVideo:DeleteStream action.

" }, @@ -96,7 +98,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Returns the most current information about the signaling channel. You must specify either the name or the ARN of the channel that you want to describe.

" + "documentation":"

Returns the most current information about the signaling channel. You must specify either the name or the Amazon Resource Name (ARN) of the channel that you want to describe.

" }, "DescribeStream":{ "name":"DescribeStream", @@ -145,7 +147,7 @@ {"shape":"ResourceInUseException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Provides an endpoint for the specified signaling channel to send and receive messages. This API uses the SingleMasterChannelEndpointConfiguration input parameter, which consists of the Protocols and Role properties.

Protocols is used to determine the communication mechanism. For example, specifying WSS as the protocol, results in this API producing a secure websocket endpoint, and specifying HTTPS as the protocol, results in this API generating an HTTPS endpoint.

Role determines the messaging permissions. A MASTER role results in this API generating an endpoint that a client can use to communicate with any of the viewers on the channel. A VIEWER role results in this API generating an endpoint that a client can use to communicate only with a MASTER.

" + "documentation":"

Provides an endpoint for the specified signaling channel to send and receive messages. This API uses the SingleMasterChannelEndpointConfiguration input parameter, which consists of the Protocols and Role properties.

Protocols is used to determine the communication mechanism. For example, if you specify WSS as the protocol, this API produces a secure websocket endpoint. If you specify HTTPS as the protocol, this API generates an HTTPS endpoint.

Role determines the messaging permissions. A MASTER role results in this API generating an endpoint that a client can use to communicate with any of the viewers on the channel. A VIEWER role results in this API generating an endpoint that a client can use to communicate only with a MASTER.

" }, "ListSignalingChannels":{ "name":"ListSignalingChannels", @@ -311,7 +313,7 @@ {"shape":"AccessDeniedException"}, {"shape":"VersionMismatchException"} ], - "documentation":"

Updates the existing signaling channel. This is an asynchronous operation and takes time to complete.

If the MessageTtlSeconds value is updated (either increased or reduced), then it only applies to new messages sent via this channel after it's been updated. Existing messages are still expire as per the previous MessageTtlSeconds value.

" + "documentation":"

Updates the existing signaling channel. This is an asynchronous operation and takes time to complete.

If the MessageTtlSeconds value is updated (either increased or reduced), it only applies to new messages sent via this channel after it's been updated. Existing messages are still expired as per the previous MessageTtlSeconds value.

" }, "UpdateStream":{ "name":"UpdateStream", @@ -341,7 +343,8 @@ "LIST_FRAGMENTS", "GET_MEDIA_FOR_FRAGMENT_LIST", "GET_HLS_STREAMING_SESSION_URL", - "GET_DASH_STREAMING_SESSION_URL" + "GET_DASH_STREAMING_SESSION_URL", + "GET_CLIP" ] }, "AccessDeniedException":{ @@ -380,7 +383,7 @@ }, "ChannelARN":{ "shape":"ResourceARN", - "documentation":"

The ARN of the signaling channel.

" + "documentation":"

The Amazon Resource Name (ARN) of the signaling channel.

" }, "ChannelType":{ "shape":"ChannelType", @@ -466,7 +469,7 @@ "members":{ "ChannelName":{ "shape":"ChannelName", - "documentation":"

A name for the signaling channel that you are creating. It must be unique for each account and region.

" + "documentation":"

A name for the signaling channel that you are creating. It must be unique for each AWS account and AWS Region.

" }, "ChannelType":{ "shape":"ChannelType", @@ -478,7 +481,7 @@ }, "Tags":{ "shape":"TagOnCreateList", - "documentation":"

A set of tags (key/value pairs) that you want to associate with this channel.

" + "documentation":"

A set of tags (key-value pairs) that you want to associate with this channel.

" } } }, @@ -487,7 +490,7 @@ "members":{ "ChannelARN":{ "shape":"ResourceARN", - "documentation":"

The ARN of the created channel.

" + "documentation":"

The Amazon Resource Name (ARN) of the created channel.

" } } }, @@ -545,11 +548,11 @@ "members":{ "ChannelARN":{ "shape":"ResourceARN", - "documentation":"

The ARN of the signaling channel that you want to delete.

" + "documentation":"

The Amazon Resource Name (ARN) of the signaling channel that you want to delete.

" }, "CurrentVersion":{ "shape":"Version", - "documentation":"

The current version of the signaling channel that you want to delete. You can obtain the current version by invoking the DescribeSignalingChannel or ListSignalingChannels APIs.

" + "documentation":"

The current version of the signaling channel that you want to delete. You can obtain the current version by invoking the DescribeSignalingChannel or ListSignalingChannels API operations.

" } } }, @@ -670,7 +673,7 @@ "members":{ "ChannelARN":{ "shape":"ResourceARN", - "documentation":"

The ARN of the signalling channel for which you want to get an endpoint.

" + "documentation":"

The Amazon Resource Name (ARN) of the signalling channel for which you want to get an endpoint.

" }, "SingleMasterChannelEndpointConfiguration":{ "shape":"SingleMasterChannelEndpointConfiguration", @@ -717,7 +720,8 @@ "KmsKeyId":{ "type":"string", "max":2048, - "min":1 + "min":1, + "pattern":".+" }, "ListOfProtocols":{ "type":"list", @@ -796,11 +800,11 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

If you specify this parameter and the result of a ListTagsForResource call is truncated, the response includes a token that you can use in the next request to fetch the next batch of tags.

" + "documentation":"

If you specify this parameter and the result of a ListTagsForResource call is truncated, the response includes a token that you can use in the next request to fetch the next batch of tags.

" }, "ResourceARN":{ "shape":"ResourceARN", - "documentation":"

The ARN of the signaling channel for which you want to list tags.

" + "documentation":"

The Amazon Resource Name (ARN) of the signaling channel for which you want to list tags.

" } } }, @@ -809,7 +813,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

If you specify this parameter and the result of a ListTagsForResource call is truncated, the response includes a token that you can use in the next request to fetch the next set of tags.

" + "documentation":"

If you specify this parameter and the result of a ListTagsForResource call is truncated, the response includes a token that you can use in the next request to fetch the next set of tags.

" }, "Tags":{ "shape":"ResourceTags", @@ -903,7 +907,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The stream is currently not available for this operation.

", + "documentation":"

The signaling channel is currently not available for this operation.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -1073,7 +1077,7 @@ "members":{ "ResourceARN":{ "shape":"ResourceARN", - "documentation":"

The ARN of the signaling channel to which you want to add tags.

" + "documentation":"

The Amazon Resource Name (ARN) of the signaling channel to which you want to add tags.

" }, "Tags":{ "shape":"TagList", @@ -1134,7 +1138,7 @@ "members":{ "ResourceARN":{ "shape":"ResourceARN", - "documentation":"

The ARN of the signaling channel from which you want to remove tags.

" + "documentation":"

The Amazon Resource Name (ARN) of the signaling channel from which you want to remove tags.

" }, "TagKeyList":{ "shape":"TagKeyList", @@ -1221,7 +1225,7 @@ "members":{ "ChannelARN":{ "shape":"ResourceARN", - "documentation":"

The ARN of the signaling channel that you want to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the signaling channel that you want to update.

" }, "CurrentVersion":{ "shape":"Version", diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index 11e2c15e06c9..6d9091bf7e0b 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json index f6227da05ac3..60f8b7223c38 100644 --- a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json +++ b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json @@ -11,6 +11,27 @@ "uid":"kinesis-video-archived-media-2017-09-30" }, "operations":{ + "GetClip":{ + "name":"GetClip", + "http":{ + "method":"POST", + "requestUri":"/getClip" + }, + "input":{"shape":"GetClipInput"}, + "output":{"shape":"GetClipOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidArgumentException"}, + {"shape":"ClientLimitExceededException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"UnsupportedStreamMediaTypeException"}, + {"shape":"MissingCodecPrivateDataException"}, + {"shape":"InvalidCodecPrivateDataException"}, + {"shape":"InvalidMediaFrameException"}, + {"shape":"NoDataRetentionException"} + ], + "documentation":"

Downloads an MP4 file (clip) containing the archived, on-demand media from the specified video stream over the specified time range.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

As a prerequsite to using GetCLip API, you must obtain an endpoint using GetDataEndpoint, specifying GET_CLIP for the APIName parameter.

An Amazon Kinesis video stream has the following requirements for providing data through MP4:

  • The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC (for AAC) or A_MS/ACM (for G.711).

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.

You can monitor the amount of outgoing data by monitoring the GetClip.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for outgoing AWS data apply.

" + }, "GetDASHStreamingSessionURL":{ "name":"GetDASHStreamingSessionURL", "http":{ @@ -94,6 +115,49 @@ "error":{"httpStatusCode":400}, "exception":true }, + "ClipFragmentSelector":{ + "type":"structure", + "required":[ + "FragmentSelectorType", + "TimestampRange" + ], + "members":{ + "FragmentSelectorType":{ + "shape":"ClipFragmentSelectorType", + "documentation":"

The origin of the timestamps to use (Server or Producer).

" + }, + "TimestampRange":{ + "shape":"ClipTimestampRange", + "documentation":"

The range of timestamps to return.

" + } + }, + "documentation":"

Describes the timestamp range and timestamp origin of a range of fragments.

Fragments that have duplicate producer timestamps are deduplicated. This means that if producers are producing a stream of fragments with producer timestamps that are approximately equal to the true clock time, the clip will contain all of the fragments within the requested timestamp range. If some fragments are ingested within the same time range and very different points in time, only the oldest ingested collection of fragments are returned.

" + }, + "ClipFragmentSelectorType":{ + "type":"string", + "enum":[ + "PRODUCER_TIMESTAMP", + "SERVER_TIMESTAMP" + ] + }, + "ClipTimestampRange":{ + "type":"structure", + "required":[ + "StartTimestamp", + "EndTimestamp" + ], + "members":{ + "StartTimestamp":{ + "shape":"Timestamp", + "documentation":"

The starting timestamp in the range of timestamps for which to return fragments.

This value is inclusive. Fragments that start before the StartTimestamp and continue past it are included in the session. If FragmentSelectorType is SERVER_TIMESTAMP, the StartTimestamp must be later than the stream head.

" + }, + "EndTimestamp":{ + "shape":"Timestamp", + "documentation":"

The end of the timestamp range for the requested media.

This value must be within 3 hours of the specified StartTimestamp, and it must be later than the StartTimestamp value. If FragmentSelectorType for the request is SERVER_TIMESTAMP, this value must be in the past.

This value is inclusive. The EndTimestamp is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp value and continue past it are included in the session.

" + } + }, + "documentation":"

The range of timestamps for which to return fragments.

The values in the ClipTimestampRange are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.

" + }, "ContainerFormat":{ "type":"string", "enum":[ @@ -175,7 +239,7 @@ "type":"structure", "members":{ "FragmentNumber":{ - "shape":"String", + "shape":"FragmentNumberString", "documentation":"

The unique identifier of the fragment. This value monotonically increases based on the ingestion order.

" }, "FragmentSizeInBytes":{ @@ -238,6 +302,40 @@ "SERVER_TIMESTAMP" ] }, + "GetClipInput":{ + "type":"structure", + "required":["ClipFragmentSelector"], + "members":{ + "StreamName":{ + "shape":"StreamName", + "documentation":"

The name of the stream for which to retrieve the media clip.

You must specify either the StreamName or the StreamARN.

" + }, + "StreamARN":{ + "shape":"ResourceARN", + "documentation":"

The Amazon Resource Name (ARN) of the stream for which to retrieve the media clip.

You must specify either the StreamName or the StreamARN.

" + }, + "ClipFragmentSelector":{ + "shape":"ClipFragmentSelector", + "documentation":"

The time range of the requested clip and the source of the timestamps.

" + } + } + }, + "GetClipOutput":{ + "type":"structure", + "members":{ + "ContentType":{ + "shape":"ContentType", + "documentation":"

The content type of the media in the requested clip.

", + "location":"header", + "locationName":"Content-Type" + }, + "Payload":{ + "shape":"Payload", + "documentation":"

Traditional MP4 file that contains the media clip from the specified video stream. The output will contain the first 100 MB or the first 200 fragments from the specified start timestamp. For more information, see Kinesis Video Streams Limits.

" + } + }, + "payload":"Payload" + }, "GetDASHStreamingSessionURLInput":{ "type":"structure", "members":{ @@ -444,6 +542,15 @@ "error":{"httpStatusCode":400}, "exception":true }, + "InvalidMediaFrameException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

One or more frames in the requested clip could not be parsed based on the specified codec.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "ListFragmentsInput":{ "type":"structure", "required":["StreamName"], @@ -457,7 +564,7 @@ "documentation":"

The total number of fragments to return. If the total number of fragments available is more than the value specified in max-results, then a ListFragmentsOutput$NextToken is provided in the output that you can use to resume pagination.

" }, "NextToken":{ - "shape":"String", + "shape":"NextToken", "documentation":"

A token to specify where to start paginating. This is the ListFragmentsOutput$NextToken from a previously truncated response.

" }, "FragmentSelector":{ @@ -474,7 +581,7 @@ "documentation":"

A list of archived Fragment objects from the stream that meet the selector criteria. Results are in no specific order, even across pages.

" }, "NextToken":{ - "shape":"String", + "shape":"NextToken", "documentation":"

If the returned list is truncated, the operation returns this token to use to retrieve the next page of results. This value is null when there are no more results to return.

" } } @@ -489,6 +596,12 @@ "error":{"httpStatusCode":400}, "exception":true }, + "NextToken":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"[a-zA-Z0-9+/]+={0,2}" + }, "NoDataRetentionException":{ "type":"structure", "members":{ @@ -537,10 +650,6 @@ "min":1, "pattern":"[a-zA-Z0-9_.-]+" }, - "String":{ - "type":"string", - "min":1 - }, "Timestamp":{"type":"timestamp"}, "TimestampRange":{ "type":"structure", diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index 5f666e002de1..6c8d005fa5fb 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index 39c33ffc4c60..32359abdcc86 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kms/pom.xml b/services/kms/pom.xml index 99d8e35a77da..60ee871766e5 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/kms/src/main/resources/codegen-resources/service-2.json b/services/kms/src/main/resources/codegen-resources/service-2.json index 54d175e6c4ef..04a21ee03f19 100755 --- a/services/kms/src/main/resources/codegen-resources/service-2.json +++ b/services/kms/src/main/resources/codegen-resources/service-2.json @@ -63,7 +63,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Creates a display name for a customer managed customer master key (CMK). You can use an alias to identify a CMK in cryptographic operations, such as Encrypt and GenerateDataKey. You can change the CMK associated with the alias at any time.

Aliases are easier to remember than key IDs. They can also help to simplify your applications. For example, if you use an alias in your code, you can change the CMK your code uses by associating a given alias with a different CMK.

To run the same code in multiple AWS regions, use an alias in your code, such as alias/ApplicationKey. Then, in each AWS Region, create an alias/ApplicationKey alias that is associated with a CMK in that Region. When you run your code, it uses the alias/ApplicationKey CMK for that AWS Region without any Region-specific code.

This operation does not return a response. To get the alias that you created, use the ListAliases operation.

To use aliases successfully, be aware of the following information.

  • Each alias points to only one CMK at a time, although a single CMK can have multiple aliases. The alias and its associated CMK must be in the same AWS account and Region.

  • You can associate an alias with any customer managed CMK in the same AWS account and Region. However, you do not have permission to associate an alias with an AWS managed CMK or an AWS owned CMK.

  • To change the CMK associated with an alias, use the UpdateAlias operation. The current CMK and the new CMK must be the same type (both symmetric or both asymmetric) and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY). This restriction prevents cryptographic errors in code that uses aliases.

  • The alias name must begin with alias/ followed by a name, such as alias/ExampleAlias. It can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). The alias name cannot begin with alias/aws/. The alias/aws/ prefix is reserved for AWS managed CMKs.

  • The alias name must be unique within an AWS Region. However, you can use the same alias name in multiple Regions of the same AWS account. Each instance of the alias is associated with a CMK in its Region.

  • After you create an alias, you cannot change its alias name. However, you can use the DeleteAlias operation to delete the alias and then create a new alias with the desired name.

  • You can use an alias name or alias ARN to identify a CMK in AWS KMS cryptographic operations and in the DescribeKey operation. However, you cannot use alias names or alias ARNs in API operations that manage CMKs, such as DisableKey or GetKeyPolicy. For information about the valid CMK identifiers for each AWS KMS API operation, see the descriptions of the KeyId parameter in the API operation documentation.

Because an alias is not a property of a CMK, you can delete and change the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases and alias ARNs of CMKs in each AWS account and Region, use the ListAliases operation.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Creates a display name for a customer managed customer master key (CMK). You can use an alias to identify a CMK in cryptographic operations, such as Encrypt and GenerateDataKey. You can change the CMK associated with the alias at any time.

Aliases are easier to remember than key IDs. They can also help to simplify your applications. For example, if you use an alias in your code, you can change the CMK your code uses by associating a given alias with a different CMK.

To run the same code in multiple AWS regions, use an alias in your code, such as alias/ApplicationKey. Then, in each AWS Region, create an alias/ApplicationKey alias that is associated with a CMK in that Region. When you run your code, it uses the alias/ApplicationKey CMK for that AWS Region without any Region-specific code.

This operation does not return a response. To get the alias that you created, use the ListAliases operation.

To use aliases successfully, be aware of the following information.

  • Each alias points to only one CMK at a time, although a single CMK can have multiple aliases. The alias and its associated CMK must be in the same AWS account and Region.

  • You can associate an alias with any customer managed CMK in the same AWS account and Region. However, you do not have permission to associate an alias with an AWS managed CMK or an AWS owned CMK.

  • To change the CMK associated with an alias, use the UpdateAlias operation. The current CMK and the new CMK must be the same type (both symmetric or both asymmetric) and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY). This restriction prevents cryptographic errors in code that uses aliases.

  • The alias name must begin with alias/ followed by a name, such as alias/ExampleAlias. It can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). The alias name cannot begin with alias/aws/. The alias/aws/ prefix is reserved for AWS managed CMKs.

  • The alias name must be unique within an AWS Region. However, you can use the same alias name in multiple Regions of the same AWS account. Each instance of the alias is associated with a CMK in its Region.

  • After you create an alias, you cannot change its alias name. However, you can use the DeleteAlias operation to delete the alias and then create a new alias with the desired name.

  • You can use an alias name or alias ARN to identify a CMK in AWS KMS cryptographic operations and in the DescribeKey operation. However, you cannot use alias names or alias ARNs in API operations that manage CMKs, such as DisableKey or GetKeyPolicy. For information about the valid CMK identifiers for each AWS KMS API operation, see the descriptions of the KeyId parameter in the API operation documentation.

Because an alias is not a property of a CMK, you can delete and change the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases and alias ARNs of CMKs in each AWS account and Region, use the ListAliases operation.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "CreateCustomKeyStore":{ "name":"CreateCustomKeyStore", @@ -102,7 +102,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Adds a grant to a customer master key (CMK). The grant allows the grantee principal to use the CMK when the conditions specified in the grant are met. When setting permissions, grants are an alternative to key policies.

To create a grant that allows a cryptographic operation only when the request includes a particular encryption context, use the Constraints parameter. For details, see GrantConstraints.

You can create grants on symmetric and asymmetric CMKs. However, if the grant allows an operation that the CMK does not support, CreateGrant fails with a ValidationException.

  • Grants for symmetric CMKs cannot allow operations that are not supported for symmetric CMKs, including Sign, Verify, and GetPublicKey. (There are limited exceptions to this rule for legacy operations, but you should not create a grant for an operation that AWS KMS does not support.)

  • Grants for asymmetric CMKs cannot allow operations that are not supported for asymmetric CMKs, including operations that generate data keys or data key pairs, or operations related to automatic key rotation, imported key material, or CMKs in custom key stores.

  • Grants for asymmetric CMKs with a KeyUsage of ENCRYPT_DECRYPT cannot allow the Sign or Verify operations. Grants for asymmetric CMKs with a KeyUsage of SIGN_VERIFY cannot allow the Encrypt or Decrypt operations.

  • Grants for asymmetric CMKs cannot include an encryption context grant constraint. An encryption context is not supported on asymmetric CMKs.

For information about symmetric and asymmetric CMKs, see Using Symmetric and Asymmetric CMKs in the AWS Key Management Service Developer Guide.

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter. For more information about grants, see Grants in the AWS Key Management Service Developer Guide .

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Adds a grant to a customer master key (CMK). The grant allows the grantee principal to use the CMK when the conditions specified in the grant are met. When setting permissions, grants are an alternative to key policies.

To create a grant that allows a cryptographic operation only when the request includes a particular encryption context, use the Constraints parameter. For details, see GrantConstraints.

You can create grants on symmetric and asymmetric CMKs. However, if the grant allows an operation that the CMK does not support, CreateGrant fails with a ValidationException.

  • Grants for symmetric CMKs cannot allow operations that are not supported for symmetric CMKs, including Sign, Verify, and GetPublicKey. (There are limited exceptions to this rule for legacy operations, but you should not create a grant for an operation that AWS KMS does not support.)

  • Grants for asymmetric CMKs cannot allow operations that are not supported for asymmetric CMKs, including operations that generate data keys or data key pairs, or operations related to automatic key rotation, imported key material, or CMKs in custom key stores.

  • Grants for asymmetric CMKs with a KeyUsage of ENCRYPT_DECRYPT cannot allow the Sign or Verify operations. Grants for asymmetric CMKs with a KeyUsage of SIGN_VERIFY cannot allow the Encrypt or Decrypt operations.

  • Grants for asymmetric CMKs cannot include an encryption context grant constraint. An encryption context is not supported on asymmetric CMKs.

For information about symmetric and asymmetric CMKs, see Using Symmetric and Asymmetric CMKs in the AWS Key Management Service Developer Guide.

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter. For more information about grants, see Grants in the AWS Key Management Service Developer Guide .

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "CreateKey":{ "name":"CreateKey", @@ -177,7 +177,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

Deletes a custom key store. This operation does not delete the AWS CloudHSM cluster that is associated with the custom key store, or affect any users or keys in the cluster.

The custom key store that you delete cannot contain any AWS KMS customer master keys (CMKs). Before deleting the key store, verify that you will never need to use any of the CMKs in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the AWS KMS customer master keys (CMKs) from the key store. When the scheduled waiting period expires, the ScheduleKeyDeletion operation deletes the CMKs. Then it makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups.

After all CMKs are deleted from AWS KMS, use DisconnectCustomKeyStore to disconnect the key store from AWS KMS. Then, you can delete the custom key store.

Instead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from AWS KMS. While the key store is disconnected, you cannot create or use the CMKs in the key store. But, you do not need to delete CMKs and you can reconnect a disconnected custom key store at any time.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in AWS KMS, which combines the convenience and extensive integration of AWS KMS with the isolation and control of a single-tenant key store.

" + "documentation":"

Deletes a custom key store. This operation does not delete the AWS CloudHSM cluster that is associated with the custom key store, or affect any users or keys in the cluster.

The custom key store that you delete cannot contain any AWS KMS customer master keys (CMKs). Before deleting the key store, verify that you will never need to use any of the CMKs in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the AWS KMS customer master keys (CMKs) from the key store. When the scheduled waiting period expires, the ScheduleKeyDeletion operation deletes the CMKs. Then it makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups.

After all CMKs are deleted from AWS KMS, use DisconnectCustomKeyStore to disconnect the key store from AWS KMS. Then, you can delete the custom key store.

Instead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from AWS KMS. While the key store is disconnected, you cannot create or use the CMKs in the key store. But, you do not need to delete CMKs and you can reconnect a disconnected custom key store at any time.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in AWS KMS, which combines the convenience and extensive integration of AWS KMS with the isolation and control of a single-tenant key store.

" }, "DeleteImportedKeyMaterial":{ "name":"DeleteImportedKeyMaterial", @@ -240,7 +240,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Sets the state of a customer master key (CMK) to disabled, thereby preventing its use for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

For more information about how key state affects the use of a CMK, see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide .

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Sets the state of a customer master key (CMK) to disabled, thereby preventing its use for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

For more information about how key state affects the use of a CMK, see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide .

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "DisableKeyRotation":{ "name":"DisableKeyRotation", @@ -273,7 +273,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

Disconnects the custom key store from its associated AWS CloudHSM cluster. While a custom key store is disconnected, you can manage the custom key store and its customer master keys (CMKs), but you cannot create or use CMKs in the custom key store. You can reconnect the custom key store at any time.

While a custom key store is disconnected, all attempts to create customer master keys (CMKs) in the custom key store or to use existing CMKs in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in AWS KMS, which combines the convenience and extensive integration of AWS KMS with the isolation and control of a single-tenant key store.

" + "documentation":"

Disconnects the custom key store from its associated AWS CloudHSM cluster. While a custom key store is disconnected, you can manage the custom key store and its customer master keys (CMKs), but you cannot create or use CMKs in the custom key store. You can reconnect the custom key store at any time.

While a custom key store is disconnected, all attempts to create customer master keys (CMKs) in the custom key store or to use existing CMKs in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in AWS KMS, which combines the convenience and extensive integration of AWS KMS with the isolation and control of a single-tenant key store.

" }, "EnableKey":{ "name":"EnableKey", @@ -290,7 +290,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Sets the key state of a customer master key (CMK) to enabled. This allows you to use the CMK for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Sets the key state of a customer master key (CMK) to enabled. This allows you to use the CMK for cryptographic operations. You cannot perform this operation on a CMK in a different AWS account.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "EnableKeyRotation":{ "name":"EnableKeyRotation", @@ -328,7 +328,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Encrypts plaintext into ciphertext by using a customer master key (CMK). The Encrypt operation has two primary use cases:

  • You can encrypt small amounts of arbitrary data, such as a personal identifier or database password, or other sensitive information.

  • You can use the Encrypt operation to move encrypted data from one AWS region to another. In the first region, generate a data key and use the plaintext key to encrypt the data. Then, in the new region, call the Encrypt method on same plaintext data key. Now, you can safely move the encrypted data and encrypted data key to the new region, and decrypt in the new region when necessary.

You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key.

When you encrypt data, you must specify a symmetric or asymmetric CMK to use in the encryption operation. The CMK must have a KeyUsage value of ENCRYPT_DECRYPT. To find the KeyUsage of a CMK, use the DescribeKey operation.

If you use a symmetric CMK, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

If you specify an asymmetric CMK, you must also specify the encryption algorithm. The algorithm must be compatible with the CMK type.

When you use an asymmetric CMK to encrypt or reencrypt data, be sure to record the CMK and encryption algorithm that you choose. You will be required to provide the same CMK and encryption algorithm when you decrypt the data. If the CMK and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

You are not required to supply the CMK ID and encryption algorithm when you decrypt with symmetric CMKs because AWS KMS stores this information in the ciphertext blob. AWS KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

The maximum size of the data that you can encrypt varies with the type of CMK and the encryption algorithm that you choose.

  • Symmetric CMKs

    • SYMMETRIC_DEFAULT: 4096 bytes

  • RSA_2048

    • RSAES_OAEP_SHA_1: 214 bytes

    • RSAES_OAEP_SHA_256: 190 bytes

  • RSA_3072

    • RSAES_OAEP_SHA_1: 342 bytes

    • RSAES_OAEP_SHA_256: 318 bytes

  • RSA_4096

    • RSAES_OAEP_SHA_1: 470 bytes

    • RSAES_OAEP_SHA_256: 446 bytes

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

" + "documentation":"

Encrypts plaintext into ciphertext by using a customer master key (CMK). The Encrypt operation has two primary use cases:

  • You can encrypt small amounts of arbitrary data, such as a personal identifier or database password, or other sensitive information.

  • You can use the Encrypt operation to move encrypted data from one AWS Region to another. For example, in Region A, generate a data key and use the plaintext key to encrypt your data. Then, in Region A, use the Encrypt operation to encrypt the plaintext data key under a CMK in Region B. Now, you can move the encrypted data and the encrypted data key to Region B. When necessary, you can decrypt the encrypted data key and the encrypted data entirely within in Region B.

You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key.

When you encrypt data, you must specify a symmetric or asymmetric CMK to use in the encryption operation. The CMK must have a KeyUsage value of ENCRYPT_DECRYPT. To find the KeyUsage of a CMK, use the DescribeKey operation.

If you use a symmetric CMK, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

If you specify an asymmetric CMK, you must also specify the encryption algorithm. The algorithm must be compatible with the CMK type.

When you use an asymmetric CMK to encrypt or reencrypt data, be sure to record the CMK and encryption algorithm that you choose. You will be required to provide the same CMK and encryption algorithm when you decrypt the data. If the CMK and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

You are not required to supply the CMK ID and encryption algorithm when you decrypt with symmetric CMKs because AWS KMS stores this information in the ciphertext blob. AWS KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

The maximum size of the data that you can encrypt varies with the type of CMK and the encryption algorithm that you choose.

  • Symmetric CMKs

    • SYMMETRIC_DEFAULT: 4096 bytes

  • RSA_2048

    • RSAES_OAEP_SHA_1: 214 bytes

    • RSAES_OAEP_SHA_256: 190 bytes

  • RSA_3072

    • RSAES_OAEP_SHA_1: 342 bytes

    • RSAES_OAEP_SHA_256: 318 bytes

  • RSA_4096

    • RSAES_OAEP_SHA_1: 470 bytes

    • RSAES_OAEP_SHA_256: 446 bytes

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter.

" }, "GenerateDataKey":{ "name":"GenerateDataKey", @@ -348,7 +348,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Generates a unique symmetric data key. This operation returns a plaintext copy of the data key and a copy that is encrypted under a customer master key (CMK) that you specify. You can use the plaintext key to encrypt your data outside of AWS KMS and store the encrypted data key with the encrypted data.

GenerateDataKey returns a unique data key for each request. The bytes in the key are not related to the caller or CMK that is used to encrypt the data key.

To generate a data key, specify the symmetric CMK that will be used to encrypt the data key. You cannot use an asymmetric CMK to generate data keys. To get the type of your CMK, use the DescribeKey operation.

You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

If the operation succeeds, the plaintext copy of the data key is in the Plaintext field of the response, and the encrypted copy of the data key in the CiphertextBlob field.

To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

We recommend that you use the following pattern to encrypt data locally in your application:

  1. Use the GenerateDataKey operation to get a data encryption key.

  2. Use the plaintext data key (returned in the Plaintext field of the response) to encrypt data locally, then erase the plaintext data key from memory.

  3. Store the encrypted data key (returned in the CiphertextBlob field of the response) alongside the locally encrypted data.

To decrypt data locally:

  1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

  2. Use the plaintext data key to decrypt data locally, then erase the plaintext data key from memory.

" + "documentation":"

Generates a unique symmetric data key for client-side encryption. This operation returns a plaintext copy of the data key and a copy that is encrypted under a customer master key (CMK) that you specify. You can use the plaintext key to encrypt your data outside of AWS KMS and store the encrypted data key with the encrypted data.

GenerateDataKey returns a unique data key for each request. The bytes in the plaintext key are not related to the caller or the CMK.

To generate a data key, specify the symmetric CMK that will be used to encrypt the data key. You cannot use an asymmetric CMK to generate data keys. To get the type of your CMK, use the DescribeKey operation. You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

How to use your data key

We recommend that you use the following pattern to encrypt data locally in your application. You can write your own code or use a client-side encryption library, such as the AWS Encryption SDK, the Amazon DynamoDB Encryption Client, or Amazon S3 client-side encryption to do these tasks for you.

To encrypt data outside of AWS KMS:

  1. Use the GenerateDataKey operation to get a data key.

  2. Use the plaintext data key (in the Plaintext field of the response) to encrypt your data outside of AWS KMS. Then erase the plaintext data key from memory.

  3. Store the encrypted data key (in the CiphertextBlob field of the response) with the encrypted data.

To decrypt data outside of AWS KMS:

  1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

  2. Use the plaintext data key to decrypt data outside of AWS KMS, then erase the plaintext data key from memory.

" }, "GenerateDataKeyPair":{ "name":"GenerateDataKeyPair", @@ -366,9 +366,10 @@ {"shape":"InvalidKeyUsageException"}, {"shape":"InvalidGrantTokenException"}, {"shape":"KMSInternalException"}, - {"shape":"KMSInvalidStateException"} + {"shape":"KMSInvalidStateException"}, + {"shape":"UnsupportedOperationException"} ], - "documentation":"

Generates a unique asymmetric data key pair. The GenerateDataKeyPair operation returns a plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric CMK you specify. You can use the data key pair to perform asymmetric cryptography outside of AWS KMS.

GenerateDataKeyPair returns a unique data key pair for each request. The bytes in the keys are not related to the caller or the CMK that is used to encrypt the private key.

You can use the public key that GenerateDataKeyPair returns to encrypt data or verify a signature outside of AWS KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

To generate a data key pair, you must specify a symmetric customer master key (CMK) to encrypt the private key in a data key pair. You cannot use an asymmetric CMK. To get the type of your CMK, use the DescribeKey operation.

If you are using the data key pair to encrypt data, or for any operation where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt operation to decrypt the encrypted private key in the data key pair.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Generates a unique asymmetric data key pair. The GenerateDataKeyPair operation returns a plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric CMK you specify. You can use the data key pair to perform asymmetric cryptography outside of AWS KMS.

GenerateDataKeyPair returns a unique data key pair for each request. The bytes in the keys are not related to the caller or the CMK that is used to encrypt the private key.

You can use the public key that GenerateDataKeyPair returns to encrypt data or verify a signature outside of AWS KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

To generate a data key pair, you must specify a symmetric customer master key (CMK) to encrypt the private key in a data key pair. You cannot use an asymmetric CMK or a CMK in a custom key store. To get the type and origin of your CMK, use the DescribeKey operation.

If you are using the data key pair to encrypt data, or for any operation where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt operation to decrypt the encrypted private key in the data key pair.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "GenerateDataKeyPairWithoutPlaintext":{ "name":"GenerateDataKeyPairWithoutPlaintext", @@ -386,9 +387,10 @@ {"shape":"InvalidKeyUsageException"}, {"shape":"InvalidGrantTokenException"}, {"shape":"KMSInternalException"}, - {"shape":"KMSInvalidStateException"} + {"shape":"KMSInvalidStateException"}, + {"shape":"UnsupportedOperationException"} ], - "documentation":"

Generates a unique asymmetric data key pair. The GenerateDataKeyPairWithoutPlaintext operation returns a plaintext public key and a copy of the private key that is encrypted under the symmetric CMK you specify. Unlike GenerateDataKeyPair, this operation does not return a plaintext private key.

To generate a data key pair, you must specify a symmetric customer master key (CMK) to encrypt the private key in the data key pair. You cannot use an asymmetric CMK. To get the type of your CMK, use the KeySpec field in the DescribeKey response.

You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a signature outside of AWS KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each request. The bytes in the key are not related to the caller or CMK that is used to encrypt the private key.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Generates a unique asymmetric data key pair. The GenerateDataKeyPairWithoutPlaintext operation returns a plaintext public key and a copy of the private key that is encrypted under the symmetric CMK you specify. Unlike GenerateDataKeyPair, this operation does not return a plaintext private key.

To generate a data key pair, you must specify a symmetric customer master key (CMK) to encrypt the private key in the data key pair. You cannot use an asymmetric CMK or a CMK in a custom key store. To get the type and origin of your CMK, use the KeySpec field in the DescribeKey response.

You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a signature outside of AWS KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each request. The bytes in the key are not related to the caller or CMK that is used to encrypt the private key.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "GenerateDataKeyWithoutPlaintext":{ "name":"GenerateDataKeyWithoutPlaintext", @@ -408,7 +410,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Generates a unique symmetric data key. This operation returns a data key that is encrypted under a customer master key (CMK) that you specify. To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that returns only the encrypted copy of the data key. This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

GenerateDataKeyWithoutPlaintext returns a unique data key for each request. The bytes in the keys are not related to the caller or CMK that is used to encrypt the private key.

To generate a data key, you must specify the symmetric customer master key (CMK) that is used to encrypt the data key. You cannot use an asymmetric CMK to generate a data key. To get the type of your CMK, use the DescribeKey operation.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Generates a unique symmetric data key. This operation returns a data key that is encrypted under a customer master key (CMK) that you specify. To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that returns only the encrypted copy of the data key. This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

GenerateDataKeyWithoutPlaintext returns a unique data key for each request. The bytes in the keys are not related to the caller or CMK that is used to encrypt the private key.

To generate a data key, you must specify the symmetric customer master key (CMK) that is used to encrypt the data key. You cannot use an asymmetric CMK to generate a data key. To get the type of your CMK, use the DescribeKey operation.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use the optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "GenerateRandom":{ "name":"GenerateRandom", @@ -556,7 +558,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Gets a list of all grants for the specified customer master key (CMK).

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter.

" + "documentation":"

Gets a list of all grants for the specified customer master key (CMK).

To perform this operation on a CMK in a different AWS account, specify the key ARN in the value of the KeyId parameter.

The GranteePrincipal field in the ListGrants response usually contains the user or role designated as the grantee principal in the grant. However, when the grantee principal in the grant is an AWS service, the GranteePrincipal field contains the service principal, which might represent several different grantee principals.

" }, "ListKeyPolicies":{ "name":"ListKeyPolicies", @@ -662,7 +664,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

Decrypts ciphertext and then reencrypts it entirely within AWS KMS. You can use this operation to change the customer master key (CMK) under which data is encrypted, such as when you manually rotate a CMK or change the CMK that protects a ciphertext. You can also use it to reencrypt ciphertext under the same CMK, such as to change the encryption context of a ciphertext.

The ReEncrypt operation can decrypt ciphertext that was encrypted by using an AWS KMS CMK in an AWS KMS operation, such as Encrypt or GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the public key of an asymmetric CMK outside of AWS KMS. However, it cannot decrypt ciphertext produced by other libraries, such as the AWS Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with AWS KMS.

When you use the ReEncrypt operation, you need to provide information for the decrypt operation and the subsequent encrypt operation.

  • If your ciphertext was encrypted under an asymmetric CMK, you must identify the source CMK, that is, the CMK that encrypted the ciphertext. You must also supply the encryption algorithm that was used. This information is required to decrypt the data.

  • It is optional, but you can specify a source CMK even when the ciphertext was encrypted under a symmetric CMK. This ensures that the ciphertext is decrypted only by using a particular CMK. If the CMK that you specify cannot decrypt the ciphertext, the ReEncrypt operation fails.

  • To reencrypt the data, you must specify the destination CMK, that is, the CMK that re-encrypts the data after it is decrypted. You can select a symmetric or asymmetric CMK. If the destination CMK is an asymmetric CMK, you must also provide the encryption algorithm. The algorithm that you choose must be compatible with the CMK.

    When you use an asymmetric CMK to encrypt or reencrypt data, be sure to record the CMK and encryption algorithm that you choose. You will be required to provide the same CMK and encryption algorithm when you decrypt the data. If the CMK and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

    You are not required to supply the CMK ID and encryption algorithm when you decrypt with symmetric CMKs because AWS KMS stores this information in the ciphertext blob. AWS KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

Unlike other AWS KMS API operations, ReEncrypt callers must have two permissions:

  • kms:EncryptFrom permission on the source CMK

  • kms:EncryptTo permission on the destination CMK

To permit reencryption from

or to a CMK, include the \"kms:ReEncrypt*\" permission in your key policy. This permission is automatically included in the key policy when you use the console to create a CMK. But you must include it manually when you create a CMK programmatically or when you use the PutKeyPolicy operation set a key policy.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

Decrypts ciphertext and then reencrypts it entirely within AWS KMS. You can use this operation to change the customer master key (CMK) under which data is encrypted, such as when you manually rotate a CMK or change the CMK that protects a ciphertext. You can also use it to reencrypt ciphertext under the same CMK, such as to change the encryption context of a ciphertext.

The ReEncrypt operation can decrypt ciphertext that was encrypted by using an AWS KMS CMK in an AWS KMS operation, such as Encrypt or GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the public key of an asymmetric CMK outside of AWS KMS. However, it cannot decrypt ciphertext produced by other libraries, such as the AWS Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with AWS KMS.

When you use the ReEncrypt operation, you need to provide information for the decrypt operation and the subsequent encrypt operation.

  • If your ciphertext was encrypted under an asymmetric CMK, you must identify the source CMK, that is, the CMK that encrypted the ciphertext. You must also supply the encryption algorithm that was used. This information is required to decrypt the data.

  • It is optional, but you can specify a source CMK even when the ciphertext was encrypted under a symmetric CMK. This ensures that the ciphertext is decrypted only by using a particular CMK. If the CMK that you specify cannot decrypt the ciphertext, the ReEncrypt operation fails.

  • To reencrypt the data, you must specify the destination CMK, that is, the CMK that re-encrypts the data after it is decrypted. You can select a symmetric or asymmetric CMK. If the destination CMK is an asymmetric CMK, you must also provide the encryption algorithm. The algorithm that you choose must be compatible with the CMK.

    When you use an asymmetric CMK to encrypt or reencrypt data, be sure to record the CMK and encryption algorithm that you choose. You will be required to provide the same CMK and encryption algorithm when you decrypt the data. If the CMK and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

    You are not required to supply the CMK ID and encryption algorithm when you decrypt with symmetric CMKs because AWS KMS stores this information in the ciphertext blob. AWS KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

Unlike other AWS KMS API operations, ReEncrypt callers must have two permissions:

  • kms:ReEncryptFrom permission on the source CMK

  • kms:ReEncryptTo permission on the destination CMK

To permit reencryption from or to a CMK, include the \"kms:ReEncrypt*\" permission in your key policy. This permission is automatically included in the key policy when you use the console to create a CMK. But you must include it manually when you create a CMK programmatically or when you use the PutKeyPolicy operation to set a key policy.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" }, "RetireGrant":{ "name":"RetireGrant", @@ -780,6 +782,7 @@ {"shape":"DependencyTimeoutException"}, {"shape":"NotFoundException"}, {"shape":"KMSInternalException"}, + {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], "documentation":"

Associates an existing AWS KMS alias with a different customer master key (CMK). Each alias is associated with only one CMK at a time, although a CMK can have multiple aliases. The alias and the CMK must be in the same AWS account and region. You cannot perform this operation on an alias in a different AWS account.

The current and new CMK must be the same type (both symmetric or both asymmetric), and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY). This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of CMK, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

Because an alias is not a property of a CMK, you can create, update, and delete the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all CMKs in the account, use the ListAliases operation.

The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" @@ -909,7 +912,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The unique identifier of the master key for which deletion is canceled.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK whose deletion is canceled.

" } } }, @@ -1081,7 +1084,7 @@ }, "Constraints":{ "shape":"GrantConstraints", - "documentation":"

Allows a cryptographic operation only when the encryption context matches or includes the encryption context specified in this structure. For more information about encryption context, see Encryption Context in the AWS Key Management Service Developer Guide .

" + "documentation":"

Allows a cryptographic operation only when the encryption context matches or includes the encryption context specified in this structure. For more information about encryption context, see Encryption Context in the AWS Key Management Service Developer Guide .

" }, "GrantTokens":{ "shape":"GrantTokenList", @@ -1119,7 +1122,7 @@ }, "KeyUsage":{ "shape":"KeyUsageType", - "documentation":"

Determines the cryptographic operations for which you can use the CMK. The default value is ENCRYPT_DECRYPT. This parameter is required only for asymmetric CMKs. You can't change the KeyUsage value after the CMK is created.

Select only one valid value.

  • For symmetric CMKs, omit the parameter or specify ENCRYPT_DECRYPT.

  • For asymmetric CMKs with RSA key material, specify ENCRYPT_DECRYPT or SIGN_VERIFY.

  • For asymmetric CMKs with ECC key material, specify SIGN_VERIFY.

" + "documentation":"

Determines the cryptographic operations for which you can use the CMK. The default value is ENCRYPT_DECRYPT. This parameter is required only for asymmetric CMKs. You can't change the KeyUsage value after the CMK is created.

Select only one valid value.

  • For symmetric CMKs, omit the parameter or specify ENCRYPT_DECRYPT.

  • For asymmetric CMKs with RSA key material, specify ENCRYPT_DECRYPT or SIGN_VERIFY.

  • For asymmetric CMKs with ECC key material, specify SIGN_VERIFY.

" }, "CustomerMasterKeySpec":{ "shape":"CustomerMasterKeySpec", @@ -1223,7 +1226,7 @@ }, "ConnectionErrorCode":{ "shape":"ConnectionErrorCodeType", - "documentation":"

Describes the connection error. This field appears in the response only when the ConnectionState is FAILED. For help resolving these errors, see How to Fix a Connection Failure in AWS Key Management Service Developer Guide.

Valid values are:

  • CLUSTER_NOT_FOUND - AWS KMS cannot find the AWS CloudHSM cluster with the specified cluster ID.

  • INSUFFICIENT_CLOUDHSM_HSMS - The associated AWS CloudHSM cluster does not contain any active HSMs. To connect a custom key store to its AWS CloudHSM cluster, the cluster must contain at least one active HSM.

  • INTERNAL_ERROR - AWS KMS could not complete the request due to an internal error. Retry the request. For ConnectCustomKeyStore requests, disconnect the custom key store before trying to connect again.

  • INVALID_CREDENTIALS - AWS KMS does not have the correct password for the kmsuser crypto user in the AWS CloudHSM cluster. Before you can connect your custom key store to its AWS CloudHSM cluster, you must change the kmsuser account password and update the key store password value for the custom key store.

  • NETWORK_ERRORS - Network errors are preventing AWS KMS from connecting to the custom key store.

  • SUBNET_NOT_FOUND - A subnet in the AWS CloudHSM cluster configuration was deleted. If AWS KMS cannot find all of the subnets that were configured for the cluster when the custom key store was created, attempts to connect fail. To fix this error, create a cluster from a backup and associate it with your custom key store. This process includes selecting a VPC and subnets. For details, see How to Fix a Connection Failure in the AWS Key Management Service Developer Guide.

  • USER_LOCKED_OUT - The kmsuser CU account is locked out of the associated AWS CloudHSM cluster due to too many failed password attempts. Before you can connect your custom key store to its AWS CloudHSM cluster, you must change the kmsuser account password and update the key store password value for the custom key store.

  • USER_LOGGED_IN - The kmsuser CU account is logged into the the associated AWS CloudHSM cluster. This prevents AWS KMS from rotating the kmsuser account password and logging into the cluster. Before you can connect your custom key store to its AWS CloudHSM cluster, you must log the kmsuser CU out of the cluster. If you changed the kmsuser password to log into the cluster, you must also and update the key store password value for the custom key store. For help, see How to Log Out and Reconnect in the AWS Key Management Service Developer Guide.

  • USER_NOT_FOUND - AWS KMS cannot find a kmsuser CU account in the associated AWS CloudHSM cluster. Before you can connect your custom key store to its AWS CloudHSM cluster, you must create a kmsuser CU account in the cluster, and then update the key store password value for the custom key store.

" + "documentation":"

Describes the connection error. This field appears in the response only when the ConnectionState is FAILED. For help resolving these errors, see How to Fix a Connection Failure in AWS Key Management Service Developer Guide.

Valid values are:

  • CLUSTER_NOT_FOUND - AWS KMS cannot find the AWS CloudHSM cluster with the specified cluster ID.

  • INSUFFICIENT_CLOUDHSM_HSMS - The associated AWS CloudHSM cluster does not contain any active HSMs. To connect a custom key store to its AWS CloudHSM cluster, the cluster must contain at least one active HSM.

  • INTERNAL_ERROR - AWS KMS could not complete the request due to an internal error. Retry the request. For ConnectCustomKeyStore requests, disconnect the custom key store before trying to connect again.

  • INVALID_CREDENTIALS - AWS KMS does not have the correct password for the kmsuser crypto user in the AWS CloudHSM cluster. Before you can connect your custom key store to its AWS CloudHSM cluster, you must change the kmsuser account password and update the key store password value for the custom key store.

  • NETWORK_ERRORS - Network errors are preventing AWS KMS from connecting to the custom key store.

  • SUBNET_NOT_FOUND - A subnet in the AWS CloudHSM cluster configuration was deleted. If AWS KMS cannot find all of the subnets in the cluster configuration, attempts to connect the custom key store to the AWS CloudHSM cluster fail. To fix this error, create a cluster from a recent backup and associate it with your custom key store. (This process creates a new cluster configuration with a VPC and private subnets.) For details, see How to Fix a Connection Failure in the AWS Key Management Service Developer Guide.

  • USER_LOCKED_OUT - The kmsuser CU account is locked out of the associated AWS CloudHSM cluster due to too many failed password attempts. Before you can connect your custom key store to its AWS CloudHSM cluster, you must change the kmsuser account password and update the key store password value for the custom key store.

  • USER_LOGGED_IN - The kmsuser CU account is logged into the the associated AWS CloudHSM cluster. This prevents AWS KMS from rotating the kmsuser account password and logging into the cluster. Before you can connect your custom key store to its AWS CloudHSM cluster, you must log the kmsuser CU out of the cluster. If you changed the kmsuser password to log into the cluster, you must also and update the key store password value for the custom key store. For help, see How to Log Out and Reconnect in the AWS Key Management Service Developer Guide.

  • USER_NOT_FOUND - AWS KMS cannot find a kmsuser CU account in the associated AWS CloudHSM cluster. Before you can connect your custom key store to its AWS CloudHSM cluster, you must create a kmsuser CU account in the cluster, and then update the key store password value for the custom key store.

" }, "CreationDate":{ "shape":"DateType", @@ -1275,7 +1278,7 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

Specifies the encryption context to use when decrypting the data. An encryption context is valid only for cryptographic operations with a symmetric CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not support an encryption context.

An encryption context is a collection of non-secret key-value pairs that represents additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is optional when encrypting with a symmetric CMK, but it is highly recommended.

For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

" + "documentation":"

Specifies the encryption context to use when decrypting the data. An encryption context is valid only for cryptographic operations with a symmetric CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not support an encryption context.

An encryption context is a collection of non-secret key-value pairs that represents additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is optional when encrypting with a symmetric CMK, but it is highly recommended.

For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

" }, "GrantTokens":{ "shape":"GrantTokenList", @@ -1296,7 +1299,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The ARN of the customer master key that was used to perform the decryption.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that was used to decrypt the ciphertext.

" }, "Plaintext":{ "shape":"PlaintextType", @@ -1498,7 +1501,7 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

Specifies the encryption context that will be used to encrypt the data. An encryption context is valid only for cryptographic operations with a symmetric CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not support an encryption context.

An encryption context is a collection of non-secret key-value pairs that represents additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is optional when encrypting with a symmetric CMK, but it is highly recommended.

For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

" + "documentation":"

Specifies the encryption context that will be used to encrypt the data. An encryption context is valid only for cryptographic operations with a symmetric CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not support an encryption context.

An encryption context is a collection of non-secret key-value pairs that represents additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is optional when encrypting with a symmetric CMK, but it is highly recommended.

For more information, see Encryption Context in the AWS Key Management Service Developer Guide.

" }, "GrantTokens":{ "shape":"GrantTokenList", @@ -1519,7 +1522,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

The ID of the key used during encryption.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that was used to encrypt the plaintext.

" }, "EncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", @@ -1575,7 +1578,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

Specifies the symmetric CMK that encrypts the private key in the data key pair. You cannot specify an asymmetric CMKs.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

Specifies the symmetric CMK that encrypts the private key in the data key pair. You cannot specify an asymmetric CMK or a CMK in a custom key store. To get the type and origin of your CMK, use the DescribeKey operation.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", @@ -1604,7 +1607,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the CMK that encrypted the private key.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that encrypted the private key.

" }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", @@ -1625,7 +1628,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

Specifies the CMK that encrypts the private key in the data key pair. You must specify a symmetric CMK. You cannot use an asymmetric CMK. To get the type of your CMK, use the DescribeKey operation.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\".

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

Specifies the CMK that encrypts the private key in the data key pair. You must specify a symmetric CMK. You cannot use an asymmetric CMK or a CMK in a custom key store. To get the type and origin of your CMK, use the DescribeKey operation.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\".

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", @@ -1650,7 +1653,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

Specifies the CMK that encrypted the private key in the data key pair. You must specify a symmetric CMK. You cannot use an asymmetric CMK. To get the type of your CMK, use the DescribeKey operation.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\".

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

  • Alias name: alias/ExampleAlias

  • Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias

To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that encrypted the private key.

" }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", @@ -1697,7 +1700,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the CMK that encrypted the data key.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that encrypted the data key.

" } } }, @@ -1736,7 +1739,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the CMK that encrypted the data key.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that encrypted the data key.

" } } }, @@ -1834,7 +1837,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the CMK to use in a subsequent ImportKeyMaterial request. This is the same CMK specified in the GetParametersForImport request.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK to use in a subsequent ImportKeyMaterial request. This is the same CMK specified in the GetParametersForImport request.

" }, "ImportToken":{ "shape":"CiphertextType", @@ -1869,7 +1872,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The identifier of the asymmetric CMK from which the public key was downloaded.

" + "documentation":"

The Amazon Resource Name (key ARN) of the asymmetric CMK from which the public key was downloaded.

" }, "PublicKey":{ "shape":"PublicKeyType", @@ -1898,14 +1901,14 @@ "members":{ "EncryptionContextSubset":{ "shape":"EncryptionContextType", - "documentation":"

A list of key-value pairs that must be included in the encryption context of the cryptographic operation request. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs.

" + "documentation":"

A list of key-value pairs that must be included in the encryption context of the cryptographic operation request. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs.

" }, "EncryptionContextEquals":{ "shape":"EncryptionContextType", - "documentation":"

A list of key-value pairs that must match the encryption context in the cryptographic operation request. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint.

" + "documentation":"

A list of key-value pairs that must match the encryption context in the cryptographic operation request. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint.

" } }, - "documentation":"

Use this structure to allow cryptographic operations in the grant only when the operation request includes the specified encryption context.

AWS KMS applies the grant constraints only when the grant allows a cryptographic operation that accepts an encryption context as input, such as the following.

AWS KMS does not apply the grant constraints to other operations, such as DescribeKey or ScheduleKeyDeletion.

In a cryptographic operation, the encryption context in the decryption operation must be an exact, case-sensitive match for the keys and values in the encryption context of the encryption operation. Only the order of the pairs can vary.

However, in a grant constraint, the key in each key-value pair is not case sensitive, but the value is case sensitive.

To avoid confusion, do not use multiple encryption context pairs that differ only by case. To require a fully case-sensitive encryption context, use the kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see kms:EncryptionContext: in the AWS Key Management Service Developer Guide .

" + "documentation":"

Use this structure to allow cryptographic operations in the grant only when the operation request includes the specified encryption context.

AWS KMS applies the grant constraints only to cryptographic operations that support an encryption context, that is, all cryptographic operations with a symmetric CMK. Grant constraints are not applied to operations that do not support an encryption context, such as cryptographic operations with asymmetric CMKs and management operations, such as DescribeKey or ScheduleKeyDeletion.

In a cryptographic operation, the encryption context in the decryption operation must be an exact, case-sensitive match for the keys and values in the encryption context of the encryption operation. Only the order of the pairs can vary.

However, in a grant constraint, the key in each key-value pair is not case sensitive, but the value is case sensitive.

To avoid confusion, do not use multiple encryption context pairs that differ only by case. To require a fully case-sensitive encryption context, use the kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see kms:EncryptionContext: in the AWS Key Management Service Developer Guide .

" }, "GrantIdType":{ "type":"string", @@ -1937,7 +1940,7 @@ }, "GranteePrincipal":{ "shape":"PrincipalIdType", - "documentation":"

The principal that receives the grant's permissions.

" + "documentation":"

The identity that gets the permissions in the grant.

The GranteePrincipal field in the ListGrants response usually contains the user or role designated as the grantee principal in the grant. However, when the grantee principal in the grant is an AWS service, the GranteePrincipal field contains the service principal, which might represent several different grantee principals.

" }, "RetiringPrincipal":{ "shape":"PrincipalIdType", @@ -1956,7 +1959,7 @@ "documentation":"

A list of key-value pairs that must be present in the encryption context of certain subsequent operations that the grant allows.

" } }, - "documentation":"

Contains information about an entry in a list of grants.

" + "documentation":"

Contains information about a grant.

" }, "GrantNameType":{ "type":"string", @@ -2206,11 +2209,11 @@ }, "KeyUsage":{ "shape":"KeyUsageType", - "documentation":"

The cryptographic operations for which you can use the CMK.

" + "documentation":"

The cryptographic operations for which you can use the CMK.

" }, "KeyState":{ "shape":"KeyState", - "documentation":"

The state of the CMK.

For more information about how key state affects the use of a CMK, see How Key State Affects the Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

" + "documentation":"

The current status of the CMK.

For more information about how key state affects the use of a CMK, see Key state: Effect on your CMK in the AWS Key Management Service Developer Guide.

" }, "DeletionDate":{ "shape":"DateType", @@ -2246,11 +2249,11 @@ }, "EncryptionAlgorithms":{ "shape":"EncryptionAlgorithmSpecList", - "documentation":"

A list of encryption algorithms that the CMK supports. You cannot use the CMK with other encryption algorithms within AWS KMS.

This field appears only when the KeyUsage of the CMK is ENCRYPT_DECRYPT.

" + "documentation":"

The encryption algorithms that the CMK supports. You cannot use the CMK with other encryption algorithms within AWS KMS.

This field appears only when the KeyUsage of the CMK is ENCRYPT_DECRYPT.

" }, "SigningAlgorithms":{ "shape":"SigningAlgorithmSpecList", - "documentation":"

A list of signing algorithms that the CMK supports. You cannot use the CMK with other signing algorithms within AWS KMS.

This field appears only when the KeyUsage of the CMK is SIGN_VERIFY.

" + "documentation":"

The signing algorithms that the CMK supports. You cannot use the CMK with other signing algorithms within AWS KMS.

This field appears only when the KeyUsage of the CMK is SIGN_VERIFY.

" } }, "documentation":"

Contains metadata about a customer master key (CMK).

This data type is used as a response element for the CreateKey and DescribeKey operations.

" @@ -2647,7 +2650,7 @@ }, "KeyId":{ "shape":"KeyIdType", - "documentation":"

Unique identifier of the CMK used to reencrypt the data.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK that was used to reencrypt the data.

" }, "SourceEncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", @@ -2712,7 +2715,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The unique identifier of the customer master key (CMK) for which deletion is scheduled.

" + "documentation":"

The Amazon Resource Name (key ARN) of the CMK whose deletion is scheduled.

" }, "DeletionDate":{ "shape":"DateType", @@ -2755,7 +2758,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The Amazon Resource Name (ARN) of the asymmetric CMK that was used to sign the message.

" + "documentation":"

The Amazon Resource Name (key ARN) of the asymmetric CMK that was used to sign the message.

" }, "Signature":{ "shape":"CiphertextType", @@ -2977,7 +2980,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

The unique identifier for the asymmetric CMK that was used to verify the signature.

" + "documentation":"

The Amazon Resource Name (key ARN) of the asymmetric CMK that was used to verify the signature.

" }, "SignatureValid":{ "shape":"BooleanType", diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index 1471d1ecf4da..2483c721b45d 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lakeformation/src/main/resources/codegen-resources/service-2.json b/services/lakeformation/src/main/resources/codegen-resources/service-2.json index 428af9390db5..1d12ef791695 100644 --- a/services/lakeformation/src/main/resources/codegen-resources/service-2.json +++ b/services/lakeformation/src/main/resources/codegen-resources/service-2.json @@ -86,7 +86,7 @@ {"shape":"InvalidInputException"}, {"shape":"EntityNotFoundException"} ], - "documentation":"

The AWS Lake Formation principal.

" + "documentation":"

Retrieves the list of the data lake administrators of a Lake Formation-managed data lake.

" }, "GetEffectivePermissionsForPath":{ "name":"GetEffectivePermissionsForPath", @@ -102,7 +102,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Returns the permissions for a specified table or database resource located at a path in Amazon S3.

" + "documentation":"

Returns the Lake Formation permissions for a specified table or database resource located at a path in Amazon S3. GetEffectivePermissionsForPath will not return databases and tables if the catalog is encrypted.

" }, "GrantPermissions":{ "name":"GrantPermissions", @@ -117,7 +117,7 @@ {"shape":"EntityNotFoundException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Grants permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3.

For information about permissions, see Security and Access Control to Metadata and Data.

" + "documentation":"

Grants permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3.

For information about permissions, see Security and Access Control to Metadata and Data.

" }, "ListPermissions":{ "name":"ListPermissions", @@ -132,7 +132,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Returns a list of the principal permissions on the resource, filtered by the permissions of the caller. For example, if you are granted an ALTER permission, you are able to see only the principal permissions for ALTER.

This operation returns only those permissions that have been explicitly granted.

For information about permissions, see Security and Access Control to Metadata and Data.

" + "documentation":"

Returns a list of the principal permissions on the resource, filtered by the permissions of the caller. For example, if you are granted an ALTER permission, you are able to see only the principal permissions for ALTER.

This operation returns only those permissions that have been explicitly granted.

For information about permissions, see Security and Access Control to Metadata and Data.

" }, "ListResources":{ "name":"ListResources", @@ -161,7 +161,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidInputException"} ], - "documentation":"

The AWS Lake Formation principal.

" + "documentation":"

Sets the list of data lake administrators who have admin privileges on all resources managed by Lake Formation. For more information on admin privileges, see Granting Lake Formation Permissions.

This API replaces the current list of data lake admins with the new list being passed. To add an admin, fetch the current list and add the new admin to that list and pass that list in this API.

" }, "RegisterResource":{ "name":"RegisterResource", @@ -177,7 +177,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"AlreadyExistsException"} ], - "documentation":"

Registers the resource as managed by the Data Catalog.

To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.

" + "documentation":"

Registers the resource as managed by the Data Catalog.

To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.

The following request registers a new location and gives AWS Lake Formation permission to use the service-linked role to access that location.

ResourceArn = arn:aws:s3:::my-bucket UseServiceLinkedRole = true

If UseServiceLinkedRole is not set to true, you must provide or set the RoleArn:

arn:aws:iam::12345:role/my-data-access-role

" }, "RevokePermissions":{ "name":"RevokePermissions", @@ -379,7 +379,7 @@ "documentation":"

An identifier for the AWS Lake Formation principal.

" } }, - "documentation":"

The AWS Lake Formation principal.

" + "documentation":"

The AWS Lake Formation principal. Supported principals are IAM users or IAM roles.

" }, "DataLakePrincipalList":{ "type":"list", @@ -406,23 +406,31 @@ "members":{ "DataLakeAdmins":{ "shape":"DataLakePrincipalList", - "documentation":"

A list of AWS Lake Formation principals.

" + "documentation":"

A list of AWS Lake Formation principals. Supported principals are IAM users or IAM roles.

" }, "CreateDatabaseDefaultPermissions":{ "shape":"PrincipalPermissionsList", - "documentation":"

A list of up to three principal permissions entries for default create database permissions.

" + "documentation":"

A structure representing a list of up to three principal permissions entries for default create database permissions.

" }, "CreateTableDefaultPermissions":{ "shape":"PrincipalPermissionsList", - "documentation":"

A list of up to three principal permissions entries for default create table permissions.

" + "documentation":"

A structure representing a list of up to three principal permissions entries for default create table permissions.

" + }, + "TrustedResourceOwners":{ + "shape":"TrustedResourceOwners", + "documentation":"

A list of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). The user ARNs can be logged in the resource owner's AWS CloudTrail log.

You may want to specify this property when you are in a high-trust boundary, such as the same team or company.

" } }, - "documentation":"

The AWS Lake Formation principal.

" + "documentation":"

A structure representing a list of AWS Lake Formation principals designated as data lake administrators and lists of principal permission entries for default create database and default create table permissions.

" }, "DataLocationResource":{ "type":"structure", "required":["ResourceArn"], "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The identifier for the Data Catalog where the location is registered with AWS Lake Formation. By default, it is the account ID of the caller.

" + }, "ResourceArn":{ "shape":"ResourceArnString", "documentation":"

The Amazon Resource Name (ARN) that uniquely identifies the data location resource.

" @@ -434,6 +442,10 @@ "type":"structure", "required":["Name"], "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The identifier for the Data Catalog. By default, it is the account ID of the caller.

" + }, "Name":{ "shape":"NameString", "documentation":"

The name of the database resource. Unique to the Data Catalog.

" @@ -552,7 +564,7 @@ "members":{ "DataLakeSettings":{ "shape":"DataLakeSettings", - "documentation":"

A list of AWS Lake Formation principals.

" + "documentation":"

A structure representing a list of AWS Lake Formation principals designated as data lake administrators.

" } } }, @@ -768,6 +780,7 @@ "DROP", "DELETE", "INSERT", + "DESCRIBE", "CREATE_DATABASE", "CREATE_TABLE", "DATA_LOCATION_ACCESS" @@ -831,7 +844,7 @@ }, "DataLakeSettings":{ "shape":"DataLakeSettings", - "documentation":"

A list of AWS Lake Formation principals.

" + "documentation":"

A structure representing a list of AWS Lake Formation principals designated as data lake administrators.

" } } }, @@ -850,11 +863,11 @@ }, "UseServiceLinkedRole":{ "shape":"NullableBoolean", - "documentation":"

Designates a trusted caller, an IAM principal, by registering this caller with the Data Catalog.

" + "documentation":"

Designates an AWS Identity and Access Management (IAM) service-linked role by registering this role with the Data Catalog. A service-linked role is a unique type of IAM role that is linked directly to Lake Formation.

For more information, see Using Service-Linked Roles for Lake Formation.

" }, "RoleArn":{ "shape":"IAMRoleArn", - "documentation":"

The identifier for the role.

" + "documentation":"

The identifier for the role that registers the resource.

" } } }, @@ -934,7 +947,7 @@ }, "Permissions":{ "shape":"PermissionList", - "documentation":"

The permissions revoked to the principal on the resource. For information about permissions, see Security and Access Control to Metadata and Data.

" + "documentation":"

The permissions revoked to the principal on the resource. For information about permissions, see Security and Access Control to Metadata and Data.

" }, "PermissionsWithGrantOption":{ "shape":"PermissionList", @@ -954,11 +967,12 @@ }, "TableResource":{ "type":"structure", - "required":[ - "DatabaseName", - "Name" - ], + "required":["DatabaseName"], "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The identifier for the Data Catalog. By default, it is the account ID of the caller.

" + }, "DatabaseName":{ "shape":"NameString", "documentation":"

The name of the database for the table. Unique to a Data Catalog. A database is a set of associated table definitions organized into a logical group. You can Grant and Revoke database privileges to a principal.

" @@ -966,13 +980,31 @@ "Name":{ "shape":"NameString", "documentation":"

The name of the table.

" + }, + "TableWildcard":{ + "shape":"TableWildcard", + "documentation":"

A wildcard object representing every table under a database.

At least one of TableResource$Name or TableResource$TableWildcard is required.

" } }, "documentation":"

A structure for the table object. A table is a metadata definition that represents your data. You can Grant and Revoke table privileges to a principal.

" }, + "TableWildcard":{ + "type":"structure", + "members":{ + }, + "documentation":"

A wildcard object representing every table under a database.

" + }, "TableWithColumnsResource":{ "type":"structure", + "required":[ + "DatabaseName", + "Name" + ], "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The identifier for the Data Catalog. By default, it is the account ID of the caller.

" + }, "DatabaseName":{ "shape":"NameString", "documentation":"

The name of the database for the table with columns resource. Unique to the Data Catalog. A database is a set of associated table definitions organized into a logical group. You can Grant and Revoke database privileges to a principal.

" @@ -993,6 +1025,10 @@ "documentation":"

A structure for a table with columns object. This object is only used when granting a SELECT permission.

This object must take a value for at least one of ColumnsNames, ColumnsIndexes, or ColumnsWildcard.

" }, "Token":{"type":"string"}, + "TrustedResourceOwners":{ + "type":"list", + "member":{"shape":"CatalogIdString"} + }, "UpdateResourceRequest":{ "type":"structure", "required":[ diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index 6afd8a3e763f..1efe15e74328 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lambda/src/main/resources/codegen-resources/service-2.json b/services/lambda/src/main/resources/codegen-resources/service-2.json index d68c06d2f490..73b9d23ac0ef 100644 --- a/services/lambda/src/main/resources/codegen-resources/service-2.json +++ b/services/lambda/src/main/resources/codegen-resources/service-2.json @@ -442,6 +442,10 @@ {"shape":"EC2UnexpectedException"}, {"shape":"SubnetIPAddressLimitReachedException"}, {"shape":"ENILimitReachedException"}, + {"shape":"EFSMountConnectivityException"}, + {"shape":"EFSMountFailureException"}, + {"shape":"EFSMountTimeoutException"}, + {"shape":"EFSIOException"}, {"shape":"EC2ThrottledException"}, {"shape":"EC2AccessDeniedException"}, {"shape":"InvalidSubnetIDException"}, @@ -455,7 +459,7 @@ {"shape":"ResourceConflictException"}, {"shape":"ResourceNotReadyException"} ], - "documentation":"

Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType to Event.

For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace.

When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Retry Behavior.

For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.

The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, limit errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if executing the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).

For functions with a long timeout, your client might be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.

This operation requires permission for the lambda:InvokeFunction action.

" + "documentation":"

Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. To invoke a function asynchronously, set InvocationType to Event.

For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace.

When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Retry Behavior.

For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.

The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, limit errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if executing the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).

For functions with a long timeout, your client might be disconnected during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.

This operation requires permission for the lambda:InvokeFunction action.

" }, "InvokeAsync":{ "name":"InvokeAsync", @@ -697,7 +701,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use PutFunctionEventInvokeConfig.

By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.

To send an invocation record to a queue, topic, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.

" + "documentation":"

Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use UpdateFunctionEventInvokeConfig.

By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.

To send an invocation record to a queue, topic, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.

" }, "PutProvisionedConcurrencyConfig":{ "name":"PutProvisionedConcurrencyConfig", @@ -1100,7 +1104,7 @@ "members":{ "AdditionalVersionWeights":{ "shape":"AdditionalVersionWeights", - "documentation":"

The name of the second alias, and the percentage of traffic that's routed to it.

" + "documentation":"

The second version, and the percentage of traffic that's routed to it.

" } }, "documentation":"

The traffic-shifting configuration of a Lambda function alias.

" @@ -1179,7 +1183,7 @@ }, "RoutingConfig":{ "shape":"AliasRoutingConfiguration", - "documentation":"

The routing configuration of the alias.

" + "documentation":"

The routing configuration of the alias.

" } } }, @@ -1313,6 +1317,10 @@ "Layers":{ "shape":"LayerList", "documentation":"

A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.

" + }, + "FileSystemConfigs":{ + "shape":"FileSystemConfigList", + "documentation":"

Connection settings for an Amazon EFS file system.

" } } }, @@ -1506,6 +1514,46 @@ "error":{"httpStatusCode":502}, "exception":true }, + "EFSIOException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

An error occured when reading from or writing to a connected file system.

", + "error":{"httpStatusCode":410}, + "exception":true + }, + "EFSMountConnectivityException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

The function couldn't make a network connection to the configured file system.

", + "error":{"httpStatusCode":408}, + "exception":true + }, + "EFSMountFailureException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

The function couldn't mount the configured file system due to a permission or configuration issue.

", + "error":{"httpStatusCode":403}, + "exception":true + }, + "EFSMountTimeoutException":{ + "type":"structure", + "members":{ + "Type":{"shape":"String"}, + "Message":{"shape":"String"} + }, + "documentation":"

The function was able to make a network connection to the configured file system, but the mount operation timed out.

", + "error":{"httpStatusCode":408}, + "exception":true + }, "ENILimitReachedException":{ "type":"structure", "members":{ @@ -1650,6 +1698,34 @@ "min":0, "pattern":"[a-zA-Z0-9._\\-]+" }, + "FileSystemArn":{ + "type":"string", + "max":200, + "pattern":"arn:aws[a-zA-Z-]*:elasticfilesystem:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:access-point/fsap-[a-f0-9]{17}" + }, + "FileSystemConfig":{ + "type":"structure", + "required":[ + "Arn", + "LocalMountPath" + ], + "members":{ + "Arn":{ + "shape":"FileSystemArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon EFS access point that provides access to the file system.

" + }, + "LocalMountPath":{ + "shape":"LocalMountPath", + "documentation":"

The path where the function can access the file system, starting with /mnt/.

" + } + }, + "documentation":"

Details about the connection between a Lambda function and an Amazon EFS file system.

" + }, + "FileSystemConfigList":{ + "type":"list", + "member":{"shape":"FileSystemConfig"}, + "max":1 + }, "FunctionArn":{ "type":"string", "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" @@ -1723,7 +1799,7 @@ }, "Timeout":{ "shape":"Timeout", - "documentation":"

The amount of time that Lambda allows a function to run before stopping it.

" + "documentation":"

The amount of time in seconds that Lambda allows a function to run before stopping it.

" }, "MemorySize":{ "shape":"MemorySize", @@ -1796,6 +1872,10 @@ "LastUpdateStatusReasonCode":{ "shape":"LastUpdateStatusReasonCode", "documentation":"

The reason code for the last update that was performed on the function.

" + }, + "FileSystemConfigs":{ + "shape":"FileSystemConfigList", + "documentation":"

Connection settings for an Amazon EFS file system.

" } }, "documentation":"

Details about a function's configuration.

" @@ -2922,6 +3002,11 @@ } } }, + "LocalMountPath":{ + "type":"string", + "max":160, + "pattern":"^/mnt/[a-zA-Z0-9-_.]+$" + }, "LogType":{ "type":"string", "enum":[ @@ -3432,7 +3517,7 @@ "Type":{"shape":"String"}, "Message":{"shape":"String"} }, - "documentation":"

The operation conflicts with the resource's availability. For example, you attempted to update an EventSource Mapping in CREATING, or tried to delete a EventSource mapping currently in the UPDATING state.

", + "documentation":"

The operation conflicts with the resource's availability. For example, you attempted to update an EventSource Mapping in CREATING, or tried to delete a EventSource mapping currently in the UPDATING state.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -3733,7 +3818,7 @@ }, "RoutingConfig":{ "shape":"AliasRoutingConfiguration", - "documentation":"

The routing configuration of the alias.

" + "documentation":"

The routing configuration of the alias.

" }, "RevisionId":{ "shape":"String", @@ -3890,6 +3975,10 @@ "Layers":{ "shape":"LayerList", "documentation":"

A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.

" + }, + "FileSystemConfigs":{ + "shape":"FileSystemConfigList", + "documentation":"

Connection settings for an Amazon EFS file system.

" } } }, diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index e27ce68e59fa..69ea1c350f9b 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json b/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json index 78808bbe7267..1cf55e1c1406 100644 --- a/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json +++ b/services/lexmodelbuilding/src/main/resources/codegen-resources/service-2.json @@ -1170,6 +1170,10 @@ "checksum":{ "shape":"String", "documentation":"

Checksum of the intent version created.

" + }, + "kendraConfiguration":{ + "shape":"KendraConfiguration", + "documentation":"

Configuration information, if any, for connectin an Amazon Kendra index with the AMAZON.KendraSearchIntent intent.

" } } }, @@ -2191,6 +2195,10 @@ "checksum":{ "shape":"String", "documentation":"

Checksum of the intent.

" + }, + "kendraConfiguration":{ + "shape":"KendraConfiguration", + "documentation":"

Configuration information, if any, to connect to an Amazon Kendra index with the AMAZON.KendraSearchIntent intent.

" } } }, @@ -2457,7 +2465,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:[\\w\\-]+:iam::[\\d]{12}:role\\/[\\w+=,\\.@\\-]{1,64}$" + "pattern":"^arn:[\\w\\-]+:iam::[\\d]{12}:role/.+$" }, "ImportStatus":{ "type":"string", @@ -2541,6 +2549,34 @@ "exception":true, "fault":true }, + "KendraConfiguration":{ + "type":"structure", + "required":[ + "kendraIndex", + "role" + ], + "members":{ + "kendraIndex":{ + "shape":"KendraIndexArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Kendra index that you want the AMAZON.KendraSearchIntent intent to search. The index must be in the same account and Region as the Amazon Lex bot. If the Amazon Kendra index does not exist, you get an exception when you call the PutIntent operation.

" + }, + "queryFilterString":{ + "shape":"QueryFilterString", + "documentation":"

A query filter that Amazon Lex sends to Amazon Kendra to filter the response from the query. The filter is in the format defined by Amazon Kendra. For more information, see Filtering queries.

You can override this filter string with a new filter string at runtime.

" + }, + "role":{ + "shape":"roleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that has permission to search the Amazon Kendra index. The role must be in the same account and Region as the Amazon Lex bot. If the role does not exist, you get an exception when you call the PutIntent operation.

" + } + }, + "documentation":"

Provides configuration information for the AMAZON.KendraSearchIntent intent. When you use this intent, Amazon Lex searches the specified Amazon Kendra index and returns documents from the index that match the user's utterance. For more information, see AMAZON.KendraSearchIntent.

" + }, + "KendraIndexArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws:kendra:[a-z]+-[a-z]+-[0-9]:[0-9]{12}:index\\/[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, "KmsKeyArn":{ "type":"string", "max":2048, @@ -3084,6 +3120,10 @@ "createVersion":{ "shape":"Boolean", "documentation":"

When set to true a new numbered version of the intent is created. This is the same as calling the CreateIntentVersion operation. If you do not specify createVersion, the default is false.

" + }, + "kendraConfiguration":{ + "shape":"KendraConfiguration", + "documentation":"

Configuration information required to use the AMAZON.KendraSearchIntent intent to connect to an Amazon Kendra index. For more information, see AMAZON.KendraSearchIntent.

" } } }, @@ -3153,6 +3193,10 @@ "createVersion":{ "shape":"Boolean", "documentation":"

True if a new version of the intent was created. If the createVersion field was not specified in the request, the createVersion field is set to false in the response.

" + }, + "kendraConfiguration":{ + "shape":"KendraConfiguration", + "documentation":"

Configuration information, if any, required to connect to an Amazon Kendra index and use the AMAZON.KendraSearchIntent intent.

" } } }, @@ -3245,6 +3289,10 @@ } } }, + "QueryFilterString":{ + "type":"string", + "min":0 + }, "ReferenceType":{ "type":"string", "enum":[ @@ -3344,7 +3392,7 @@ }, "priority":{ "shape":"Priority", - "documentation":"

Directs Lex the order in which to elicit this slot value from the user. For example, if the intent has two slots with priorities 1 and 2, AWS Lex first elicits a value for the slot with priority 1.

If multiple slots share the same priority, the order in which Lex elicits values is arbitrary.

" + "documentation":"

Directs Amazon Lex the order in which to elicit this slot value from the user. For example, if the intent has two slots with priorities 1 and 2, AWS Amazon Lex first elicits a value for the slot with priority 1.

If multiple slots share the same priority, the order in which Amazon Lex elicits values is arbitrary.

" }, "sampleUtterances":{ "shape":"SlotUtteranceList", @@ -3712,6 +3760,12 @@ "max":64, "min":1, "pattern":"\\$LATEST|[0-9]+" + }, + "roleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws:iam::[0-9]{12}:role/.*" } }, "documentation":"Amazon Lex Build-Time Actions

Amazon Lex is an AWS service for building conversational voice and text interfaces. Use these actions to create, update, and delete conversational bots for new and existing client applications.

" diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index ebad385ce6d5..e32ec65671ab 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index 9802cb76df2d..24b987dd9add 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index 55e3136160e7..117867d75379 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/lightsail/src/main/resources/codegen-resources/service-2.json b/services/lightsail/src/main/resources/codegen-resources/service-2.json index 3e8396af5806..edd9f1cd8e78 100644 --- a/services/lightsail/src/main/resources/codegen-resources/service-2.json +++ b/services/lightsail/src/main/resources/codegen-resources/service-2.json @@ -124,7 +124,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Closes the public ports on a specific Amazon Lightsail instance.

The close instance public ports operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Closes ports for a specific Amazon Lightsail instance.

The CloseInstancePublicPorts action supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" }, "CopySnapshot":{ "name":"CopySnapshot", @@ -1179,7 +1179,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the data points for the specified Amazon Lightsail instance metric, given an instance name.

" + "documentation":"

Returns the data points for the specified Amazon Lightsail instance metric, given an instance name.

Metrics report the utilization of your resources, and the error counts generated by them. Monitor and collect metric data regularly to maintain the reliability, availability, and performance of your resources.

" }, "GetInstancePortStates":{ "name":"GetInstancePortStates", @@ -1198,7 +1198,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the port states for a specific virtual private server, or instance.

" + "documentation":"

Returns the firewall port states for a specific Amazon Lightsail instance, the IP addresses allowed to connect to the instance through the ports, and the protocol.

" }, "GetInstanceSnapshot":{ "name":"GetInstanceSnapshot", @@ -1350,7 +1350,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns information about health metrics for your Lightsail load balancer.

" + "documentation":"

Returns information about health metrics for your Lightsail load balancer.

Metrics report the utilization of your resources, and the error counts generated by them. Monitor and collect metric data regularly to maintain the reliability, availability, and performance of your resources.

" }, "GetLoadBalancerTlsCertificates":{ "name":"GetLoadBalancerTlsCertificates", @@ -1616,7 +1616,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Returns the data points of the specified metric for a database in Amazon Lightsail.

" + "documentation":"

Returns the data points of the specified metric for a database in Amazon Lightsail.

Metrics report the utilization of your resources, and the error counts generated by them. Monitor and collect metric data regularly to maintain the reliability, availability, and performance of your resources.

" }, "GetRelationalDatabaseParameters":{ "name":"GetRelationalDatabaseParameters", @@ -1787,7 +1787,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Adds public ports to an Amazon Lightsail instance.

The open instance public ports operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Opens ports for a specific Amazon Lightsail instance, and specifies the IP addresses allowed to connect to the instance through the ports, and the protocol.

The OpenInstancePublicPorts action supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" }, "PeerVpc":{ "name":"PeerVpc", @@ -1843,7 +1843,7 @@ {"shape":"AccountSetupInProgressException"}, {"shape":"UnauthenticatedException"} ], - "documentation":"

Sets the specified open ports for an Amazon Lightsail instance, and closes all ports for every protocol not included in the current request.

The put instance public ports operation supports tag-based access control via resource tags applied to the resource identified by instance name. For more information, see the Lightsail Dev Guide.

" + "documentation":"

Opens ports for a specific Amazon Lightsail instance, and specifies the IP addresses allowed to connect to the instance through the ports, and the protocol. This action also closes all currently open ports that are not included in the request. Include all of the ports and the protocols you want to open in your PutInstancePublicPortsrequest. Or use the OpenInstancePublicPorts action to open ports without closing currently open ports.

The PutInstancePublicPorts action supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the Lightsail Dev Guide.

" }, "RebootInstance":{ "name":"RebootInstance", @@ -1918,7 +1918,7 @@ {"shape":"AccessDeniedException"}, {"shape":"NotFoundException"} ], - "documentation":"

Sends a verification request to an email contact method to ensure it’s owned by the requester. SMS contact methods don’t need to be verified.

A contact method is used to send you notifications about your Amazon Lightsail resources. You can add one email address and one mobile phone number contact method in each AWS Region. However, SMS text messaging is not supported in some AWS Regions, and SMS text messages cannot be sent to some countries/regions. For more information, see Notifications in Amazon Lightsail.

A verification request is sent to the contact method when you initially create it. Use this action to send another verification request if a previous verification request was deleted, or has expired.

Notifications are not sent to an email contact method until after it is verified, and confirmed as valid.

" + "documentation":"

Sends a verification request to an email contact method to ensure it's owned by the requester. SMS contact methods don't need to be verified.

A contact method is used to send you notifications about your Amazon Lightsail resources. You can add one email address and one mobile phone number contact method in each AWS Region. However, SMS text messaging is not supported in some AWS Regions, and SMS text messages cannot be sent to some countries/regions. For more information, see Notifications in Amazon Lightsail.

A verification request is sent to the contact method when you initially create it. Use this action to send another verification request if a previous verification request was deleted, or has expired.

Notifications are not sent to an email contact method until after it is verified, and confirmed as valid.

" }, "StartInstance":{ "name":"StartInstance", @@ -2280,11 +2280,11 @@ }, "treatMissingData":{ "shape":"TreatMissingData", - "documentation":"

Specifies how the alarm handles missing data points.

An alarm can treat missing data in the following ways:

  • breaching — Assume the missing data is not within the threshold. Missing data counts towards the number of times the metric is not within the threshold.

  • notBreaching — Assume the missing data is within the threshold. Missing data does not count towards the number of times the metric is not within the threshold.

  • ignore — Ignore the missing data. Maintains the current alarm state.

  • missing — Missing data is treated as missing.

" + "documentation":"

Specifies how the alarm handles missing data points.

An alarm can treat missing data in the following ways:

  • breaching - Assume the missing data is not within the threshold. Missing data counts towards the number of times the metric is not within the threshold.

  • notBreaching - Assume the missing data is within the threshold. Missing data does not count towards the number of times the metric is not within the threshold.

  • ignore - Ignore the missing data. Maintains the current alarm state.

  • missing - Missing data is treated as missing.

" }, "statistic":{ "shape":"MetricStatistic", - "documentation":"

The statistic for the metric associated with the alarm.

The following statistics are available:

  • Minimum — The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.

  • Maximum — The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

  • Sum — All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.

  • Average — The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.

  • SampleCount — The count, or number, of data points used for the statistical calculation.

" + "documentation":"

The statistic for the metric associated with the alarm.

The following statistics are available:

  • Minimum - The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.

  • Maximum - The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

  • Sum - All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.

  • Average - The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.

  • SampleCount - The count, or number, of data points used for the statistical calculation.

" }, "metricName":{ "shape":"MetricName", @@ -2292,7 +2292,7 @@ }, "state":{ "shape":"AlarmState", - "documentation":"

The current state of the alarm.

An alarm has the following possible states:

  • ALARM — The metric is outside of the defined threshold.

  • INSUFFICIENT_DATA — The alarm has just started, the metric is not available, or not enough data is available for the metric to determine the alarm state.

  • OK — The metric is within the defined threshold.

" + "documentation":"

The current state of the alarm.

An alarm has the following possible states:

  • ALARM - The metric is outside of the defined threshold.

  • INSUFFICIENT_DATA - The alarm has just started, the metric is not available, or not enough data is available for the metric to determine the alarm state.

  • OK - The metric is within the defined threshold.

" }, "unit":{ "shape":"MetricUnit", @@ -2340,7 +2340,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -2371,7 +2371,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -2397,7 +2397,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -2423,7 +2423,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

These SSL/TLS certificates are only usable by Lightsail load balancers. You can't get the certificate and use it for another purpose.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

These SSL/TLS certificates are only usable by Lightsail load balancers. You can't get the certificate and use it for another purpose.

" } } }, @@ -2449,7 +2449,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -2614,7 +2614,7 @@ "members":{ "price":{ "shape":"float", - "documentation":"

The price in US dollars (e.g., 5.0).

" + "documentation":"

The price in US dollars (e.g., 5.0) of the bundle.

" }, "cpuCount":{ "shape":"integer", @@ -2672,11 +2672,11 @@ "members":{ "portInfo":{ "shape":"PortInfo", - "documentation":"

Information about the public port you are trying to close.

" + "documentation":"

An object to describe the ports to close for the specified instance.

" }, "instanceName":{ "shape":"ResourceName", - "documentation":"

The name of the instance on which you're attempting to close the public ports.

" + "documentation":"

The name of the instance for which to close ports.

" } } }, @@ -2685,7 +2685,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An object that describes the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -2775,7 +2775,7 @@ }, "status":{ "shape":"ContactMethodStatus", - "documentation":"

The current status of the contact method.

A contact method has the following possible status:

  • PendingVerification — The contact method has not yet been verified, and the verification has not yet expired.

  • Valid — The contact method has been verified.

  • InValid — An attempt was made to verify the contact method, but the verification has expired.

" + "documentation":"

The current status of the contact method.

A contact method has the following possible status:

  • PendingVerification - The contact method has not yet been verified, and the verification has not yet expired.

  • Valid - The contact method has been verified.

  • InValid - An attempt was made to verify the contact method, but the verification has expired.

" }, "protocol":{ "shape":"ContactProtocol", @@ -2870,7 +2870,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -2889,7 +2889,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -2906,7 +2906,7 @@ }, "contactEndpoint":{ "shape":"StringMax256", - "documentation":"

The destination of the contact method, such as an email address or a mobile phone number.

Use the E.164 format when specifying a mobile phone number. E.164 is a standard for the phone number structure used for international telecommunication. Phone numbers that follow this format can have a maximum of 15 digits, and they are prefixed with the plus character (+) and the country code. For example, a U.S. phone number in E.164 format would be specified as +1XXX5550100. For more information, see E.164 in Wikipedia.

" + "documentation":"

The destination of the contact method, such as an email address or a mobile phone number.

Use the E.164 format when specifying a mobile phone number. E.164 is a standard for the phone number structure used for international telecommunication. Phone numbers that follow this format can have a maximum of 15 digits, and they are prefixed with the plus character (+) and the country code. For example, a U.S. phone number in E.164 format would be specified as +1XXX5550100. For more information, see E.164 on Wikipedia.

" } } }, @@ -2915,7 +2915,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -2970,7 +2970,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3009,7 +3009,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3040,7 +3040,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3066,7 +3066,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3089,7 +3089,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3119,7 +3119,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3186,7 +3186,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3243,7 +3243,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3278,7 +3278,7 @@ }, "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3324,7 +3324,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3363,7 +3363,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3414,7 +3414,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3479,7 +3479,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3509,7 +3509,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3528,7 +3528,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3554,7 +3554,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3573,7 +3573,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3596,7 +3596,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3615,7 +3615,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3641,7 +3641,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3660,7 +3660,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3683,7 +3683,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3702,7 +3702,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3721,7 +3721,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3740,7 +3740,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3759,7 +3759,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3789,7 +3789,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3816,7 +3816,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3835,7 +3835,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3868,7 +3868,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3894,7 +3894,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3913,7 +3913,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -3939,7 +3939,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -4295,7 +4295,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -4401,7 +4401,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -4762,7 +4762,7 @@ }, "metricName":{ "shape":"InstanceMetricName", - "documentation":"

The metric for which you want to return information.

Valid instance metric names are listed below, along with the most useful statistics to include in your request, and the published unit value.

  • CPUUtilization — The percentage of allocated compute units that are currently in use on the instance. This metric identifies the processing power to run the applications on the instance. Tools in your operating system can show a lower percentage than Lightsail when the instance is not allocated a full processor core.

    Statistics: The most useful statistics are Maximum and Average.

    Unit: The published unit is Percent.

  • NetworkIn — The number of bytes received on all network interfaces by the instance. This metric identifies the volume of incoming network traffic to the instance. The number reported is the number of bytes received during the period. Because this metric is reported in 5-minute intervals, divide the reported number by 300 to find Bytes/second.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Bytes.

  • NetworkOut — The number of bytes sent out on all network interfaces by the instance. This metric identifies the volume of outgoing network traffic from the instance. The number reported is the number of bytes sent during the period. Because this metric is reported in 5-minute intervals, divide the reported number by 300 to find Bytes/second.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Bytes.

  • StatusCheckFailed — Reports whether the instance passed or failed both the instance status check and the system status check. This metric can be either 0 (passed) or 1 (failed). This metric data is available in 1-minute (60 seconds) granularity.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

  • StatusCheckFailed_Instance — Reports whether the instance passed or failed the instance status check. This metric can be either 0 (passed) or 1 (failed). This metric data is available in 1-minute (60 seconds) granularity.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

  • StatusCheckFailed_System — Reports whether the instance passed or failed the system status check. This metric can be either 0 (passed) or 1 (failed). This metric data is available in 1-minute (60 seconds) granularity.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

" + "documentation":"

The metric for which you want to return information.

Valid instance metric names are listed below, along with the most useful statistics to include in your request, and the published unit value.

  • BurstCapacityPercentage - The percentage of CPU performance available for your instance to burst above its baseline. Your instance continuously accrues and consumes burst capacity. Burst capacity stops accruing when your instance's BurstCapacityPercentage reaches 100%. For more information, see Viewing instance burst capacity in Amazon Lightsail.

    Statistics: The most useful statistics are Maximum and Average.

    Unit: The published unit is Percent.

  • BurstCapacityTime - The available amount of time for your instance to burst at 100% CPU utilization. Your instance continuously accrues and consumes burst capacity. Burst capacity time stops accruing when your instance's BurstCapacityPercentage metric reaches 100%.

    Burst capacity time is consumed at the full rate only when your instance operates at 100% CPU utilization. For example, if your instance operates at 50% CPU utilization in the burstable zone for a 5-minute period, then it consumes CPU burst capacity minutes at a 50% rate in that period. Your instance consumed 2 minutes and 30 seconds of CPU burst capacity minutes in the 5-minute period. For more information, see Viewing instance burst capacity in Amazon Lightsail.

    Statistics: The most useful statistics are Maximum and Average.

    Unit: The published unit is Seconds.

  • CPUUtilization - The percentage of allocated compute units that are currently in use on the instance. This metric identifies the processing power to run the applications on the instance. Tools in your operating system can show a lower percentage than Lightsail when the instance is not allocated a full processor core.

    Statistics: The most useful statistics are Maximum and Average.

    Unit: The published unit is Percent.

  • NetworkIn - The number of bytes received on all network interfaces by the instance. This metric identifies the volume of incoming network traffic to the instance. The number reported is the number of bytes received during the period. Because this metric is reported in 5-minute intervals, divide the reported number by 300 to find Bytes/second.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Bytes.

  • NetworkOut - The number of bytes sent out on all network interfaces by the instance. This metric identifies the volume of outgoing network traffic from the instance. The number reported is the number of bytes sent during the period. Because this metric is reported in 5-minute intervals, divide the reported number by 300 to find Bytes/second.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Bytes.

  • StatusCheckFailed - Reports whether the instance passed or failed both the instance status check and the system status check. This metric can be either 0 (passed) or 1 (failed). This metric data is available in 1-minute (60 seconds) granularity.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

  • StatusCheckFailed_Instance - Reports whether the instance passed or failed the instance status check. This metric can be either 0 (passed) or 1 (failed). This metric data is available in 1-minute (60 seconds) granularity.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

  • StatusCheckFailed_System - Reports whether the instance passed or failed the system status check. This metric can be either 0 (passed) or 1 (failed). This metric data is available in 1-minute (60 seconds) granularity.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

" }, "period":{ "shape":"MetricPeriod", @@ -4778,11 +4778,11 @@ }, "unit":{ "shape":"MetricUnit", - "documentation":"

The unit for the metric data request. Valid units depend on the metric data being required. For the valid units with each available metric, see the metricName parameter.

" + "documentation":"

The unit for the metric data request. Valid units depend on the metric data being requested. For the valid units to specify with each available metric, see the metricName parameter.

" }, "statistics":{ "shape":"MetricStatisticList", - "documentation":"

The statistic for the metric.

The following statistics are available:

  • Minimum — The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.

  • Maximum — The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

  • Sum — All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.

  • Average — The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.

  • SampleCount — The count, or number, of data points used for the statistical calculation.

" + "documentation":"

The statistic for the metric.

The following statistics are available:

  • Minimum - The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.

  • Maximum - The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

  • Sum - All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.

  • Average - The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.

  • SampleCount - The count, or number, of data points used for the statistical calculation.

" } } }, @@ -4791,11 +4791,11 @@ "members":{ "metricName":{ "shape":"InstanceMetricName", - "documentation":"

The metric name to return data for.

" + "documentation":"

The name of the metric returned.

" }, "metricData":{ "shape":"MetricDatapointList", - "documentation":"

An array of key-value pairs containing information about the results of your get instance metric data request.

" + "documentation":"

An array of objects that describe the metric data returned.

" } } }, @@ -4805,7 +4805,7 @@ "members":{ "instanceName":{ "shape":"ResourceName", - "documentation":"

The name of the instance.

" + "documentation":"

The name of the instance for which to return firewall port states.

" } } }, @@ -4814,7 +4814,7 @@ "members":{ "portStates":{ "shape":"InstancePortStateList", - "documentation":"

Information about the port states resulting from your request.

" + "documentation":"

An array of objects that describe the firewall port states for the specified instance.

" } } }, @@ -4978,7 +4978,7 @@ }, "metricName":{ "shape":"LoadBalancerMetricName", - "documentation":"

The metric for which you want to return information.

Valid load balancer metric names are listed below, along with the most useful statistics to include in your request, and the published unit value.

  • ClientTLSNegotiationErrorCount — The number of TLS connections initiated by the client that did not establish a session with the load balancer due to a TLS error generated by the load balancer. Possible causes include a mismatch of ciphers or protocols.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

  • HealthyHostCount — The number of target instances that are considered healthy.

    Statistics: The most useful statistic are Average, Minimum, and Maximum.

    Unit: The published unit is Count.

  • HTTPCode_Instance_2XX_Count — The number of HTTP 2XX response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • HTTPCode_Instance_3XX_Count — The number of HTTP 3XX response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • HTTPCode_Instance_4XX_Count — The number of HTTP 4XX response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • HTTPCode_Instance_5XX_Count — The number of HTTP 5XX response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • HTTPCode_LB_4XX_Count — The number of HTTP 4XX client error codes that originated from the load balancer. Client errors are generated when requests are malformed or incomplete. These requests were not received by the target instance. This count does not include response codes generated by the target instances.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • HTTPCode_LB_5XX_Count — The number of HTTP 5XX server error codes that originated from the load balancer. This does not include any response codes generated by the target instance. This metric is reported if there are no healthy instances attached to the load balancer, or if the request rate exceeds the capacity of the instances (spillover) or the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • InstanceResponseTime — The time elapsed, in seconds, after the request leaves the load balancer until a response from the target instance is received.

    Statistics: The most useful statistic is Average.

    Unit: The published unit is Seconds.

  • RejectedConnectionCount — The number of connections that were rejected because the load balancer had reached its maximum number of connections.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

  • RequestCount — The number of requests processed over IPv4. This count includes only the requests with a response generated by a target instance of the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • UnhealthyHostCount — The number of target instances that are considered unhealthy.

    Statistics: The most useful statistic are Average, Minimum, and Maximum.

    Unit: The published unit is Count.

" + "documentation":"

The metric for which you want to return information.

Valid load balancer metric names are listed below, along with the most useful statistics to include in your request, and the published unit value.

  • ClientTLSNegotiationErrorCount - The number of TLS connections initiated by the client that did not establish a session with the load balancer due to a TLS error generated by the load balancer. Possible causes include a mismatch of ciphers or protocols.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

  • HealthyHostCount - The number of target instances that are considered healthy.

    Statistics: The most useful statistic are Average, Minimum, and Maximum.

    Unit: The published unit is Count.

  • HTTPCode_Instance_2XX_Count - The number of HTTP 2XX response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • HTTPCode_Instance_3XX_Count - The number of HTTP 3XX response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • HTTPCode_Instance_4XX_Count - The number of HTTP 4XX response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • HTTPCode_Instance_5XX_Count - The number of HTTP 5XX response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • HTTPCode_LB_4XX_Count - The number of HTTP 4XX client error codes that originated from the load balancer. Client errors are generated when requests are malformed or incomplete. These requests were not received by the target instance. This count does not include response codes generated by the target instances.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • HTTPCode_LB_5XX_Count - The number of HTTP 5XX server error codes that originated from the load balancer. This does not include any response codes generated by the target instance. This metric is reported if there are no healthy instances attached to the load balancer, or if the request rate exceeds the capacity of the instances (spillover) or the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • InstanceResponseTime - The time elapsed, in seconds, after the request leaves the load balancer until a response from the target instance is received.

    Statistics: The most useful statistic is Average.

    Unit: The published unit is Seconds.

  • RejectedConnectionCount - The number of connections that were rejected because the load balancer had reached its maximum number of connections.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

  • RequestCount - The number of requests processed over IPv4. This count includes only the requests with a response generated by a target instance of the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

    Unit: The published unit is Count.

  • UnhealthyHostCount - The number of target instances that are considered unhealthy.

    Statistics: The most useful statistic are Average, Minimum, and Maximum.

    Unit: The published unit is Count.

" }, "period":{ "shape":"MetricPeriod", @@ -4994,11 +4994,11 @@ }, "unit":{ "shape":"MetricUnit", - "documentation":"

The unit for the metric data request. Valid units depend on the metric data being required. For the valid units with each available metric, see the metricName parameter.

" + "documentation":"

The unit for the metric data request. Valid units depend on the metric data being requested. For the valid units with each available metric, see the metricName parameter.

" }, "statistics":{ "shape":"MetricStatisticList", - "documentation":"

The statistic for the metric.

The following statistics are available:

  • Minimum — The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.

  • Maximum — The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

  • Sum — All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.

  • Average — The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.

  • SampleCount — The count, or number, of data points used for the statistical calculation.

" + "documentation":"

The statistic for the metric.

The following statistics are available:

  • Minimum - The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.

  • Maximum - The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

  • Sum - All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.

  • Average - The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.

  • SampleCount - The count, or number, of data points used for the statistical calculation.

" } } }, @@ -5007,11 +5007,11 @@ "members":{ "metricName":{ "shape":"LoadBalancerMetricName", - "documentation":"

The metric about which you are receiving information. Valid values are listed below, along with the most useful statistics to include in your request.

  • ClientTLSNegotiationErrorCount - The number of TLS connections initiated by the client that did not establish a session with the load balancer. Possible causes include a mismatch of ciphers or protocols.

    Statistics: The most useful statistic is Sum.

  • HealthyHostCount - The number of target instances that are considered healthy.

    Statistics: The most useful statistic are Average, Minimum, and Maximum.

  • UnhealthyHostCount - The number of target instances that are considered unhealthy.

    Statistics: The most useful statistic are Average, Minimum, and Maximum.

  • HTTPCode_LB_4XX_Count - The number of HTTP 4XX client error codes that originate from the load balancer. Client errors are generated when requests are malformed or incomplete. These requests have not been received by the target instance. This count does not include any response codes generated by the target instances.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

  • HTTPCode_LB_5XX_Count - The number of HTTP 5XX server error codes that originate from the load balancer. This count does not include any response codes generated by the target instances.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1. Note that Minimum, Maximum, and Average all return 1.

  • HTTPCode_Instance_2XX_Count - The number of HTTP response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

  • HTTPCode_Instance_3XX_Count - The number of HTTP response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

  • HTTPCode_Instance_4XX_Count - The number of HTTP response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

  • HTTPCode_Instance_5XX_Count - The number of HTTP response codes generated by the target instances. This does not include any response codes generated by the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

  • InstanceResponseTime - The time elapsed, in seconds, after the request leaves the load balancer until a response from the target instance is received.

    Statistics: The most useful statistic is Average.

  • RejectedConnectionCount - The number of connections that were rejected because the load balancer had reached its maximum number of connections.

    Statistics: The most useful statistic is Sum.

  • RequestCount - The number of requests processed over IPv4. This count includes only the requests with a response generated by a target instance of the load balancer.

    Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and Average all return 1.

" + "documentation":"

The name of the metric returned.

" }, "metricData":{ "shape":"MetricDatapointList", - "documentation":"

An array of metric datapoint objects.

" + "documentation":"

An array of objects that describe the metric data returned.

" } } }, @@ -5090,7 +5090,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -5113,7 +5113,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" }, "nextPageCount":{ "shape":"string", @@ -5140,7 +5140,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" }, "nextPageToken":{ "shape":"string", @@ -5359,7 +5359,7 @@ }, "metricName":{ "shape":"RelationalDatabaseMetricName", - "documentation":"

The metric for which you want to return information.

Valid relational database metric names are listed below, along with the most useful statistics to include in your request, and the published unit value. All relational database metric data is available in 1-minute (60 seconds) granularity.

  • CPUUtilization — The percentage of CPU utilization currently in use on the database.

    Statistics: The most useful statistics are Maximum and Average.

    Unit: The published unit is Percent.

  • DatabaseConnections — The number of database connections in use.

    Statistics: The most useful statistics are Maximum and Sum.

    Unit: The published unit is Count.

  • DiskQueueDepth — The number of outstanding IOs (read/write requests) that are waiting to access the disk.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

  • FreeStorageSpace — The amount of available storage space.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Bytes.

  • NetworkReceiveThroughput — The incoming (Receive) network traffic on the database, including both customer database traffic and AWS traffic used for monitoring and replication.

    Statistics: The most useful statistic is Average.

    Unit: The published unit is Bytes/Second.

  • NetworkTransmitThroughput — The outgoing (Transmit) network traffic on the database, including both customer database traffic and AWS traffic used for monitoring and replication.

    Statistics: The most useful statistic is Average.

    Unit: The published unit is Bytes/Second.

" + "documentation":"

The metric for which you want to return information.

Valid relational database metric names are listed below, along with the most useful statistics to include in your request, and the published unit value. All relational database metric data is available in 1-minute (60 seconds) granularity.

  • CPUUtilization - The percentage of CPU utilization currently in use on the database.

    Statistics: The most useful statistics are Maximum and Average.

    Unit: The published unit is Percent.

  • DatabaseConnections - The number of database connections in use.

    Statistics: The most useful statistics are Maximum and Sum.

    Unit: The published unit is Count.

  • DiskQueueDepth - The number of outstanding IOs (read/write requests) that are waiting to access the disk.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Count.

  • FreeStorageSpace - The amount of available storage space.

    Statistics: The most useful statistic is Sum.

    Unit: The published unit is Bytes.

  • NetworkReceiveThroughput - The incoming (Receive) network traffic on the database, including both customer database traffic and AWS traffic used for monitoring and replication.

    Statistics: The most useful statistic is Average.

    Unit: The published unit is Bytes/Second.

  • NetworkTransmitThroughput - The outgoing (Transmit) network traffic on the database, including both customer database traffic and AWS traffic used for monitoring and replication.

    Statistics: The most useful statistic is Average.

    Unit: The published unit is Bytes/Second.

" }, "period":{ "shape":"MetricPeriod", @@ -5375,11 +5375,11 @@ }, "unit":{ "shape":"MetricUnit", - "documentation":"

The unit for the metric data request. Valid units depend on the metric data being required. For the valid units with each available metric, see the metricName parameter.

" + "documentation":"

The unit for the metric data request. Valid units depend on the metric data being requested. For the valid units with each available metric, see the metricName parameter.

" }, "statistics":{ "shape":"MetricStatisticList", - "documentation":"

The statistic for the metric.

The following statistics are available:

  • Minimum — The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.

  • Maximum — The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

  • Sum — All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.

  • Average — The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.

  • SampleCount — The count, or number, of data points used for the statistical calculation.

" + "documentation":"

The statistic for the metric.

The following statistics are available:

  • Minimum - The lowest value observed during the specified period. Use this value to determine low volumes of activity for your application.

  • Maximum - The highest value observed during the specified period. Use this value to determine high volumes of activity for your application.

  • Sum - All values submitted for the matching metric added together. You can use this statistic to determine the total volume of a metric.

  • Average - The value of Sum / SampleCount during the specified period. By comparing this statistic with the Minimum and Maximum values, you can determine the full scope of a metric and how close the average use is to the Minimum and Maximum values. This comparison helps you to know when to increase or decrease your resources.

  • SampleCount - The count, or number, of data points used for the statistical calculation.

" } } }, @@ -5388,11 +5388,11 @@ "members":{ "metricName":{ "shape":"RelationalDatabaseMetricName", - "documentation":"

The name of the metric.

" + "documentation":"

The name of the metric returned.

" }, "metricData":{ "shape":"MetricDatapointList", - "documentation":"

An object describing the result of your get relational database metric data request.

" + "documentation":"

An array of objects that describe the metric data returned.

" } } }, @@ -5606,7 +5606,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -5768,7 +5768,7 @@ }, "portInfoSource":{ "shape":"PortInfoSourceType", - "documentation":"

The port configuration to use for the new Amazon EC2 instance.

The following configuration options are available:

  • DEFAULT — Use the default firewall settings from the image.

  • INSTANCE — Use the firewall settings from the source Lightsail instance.

  • NONE — Default to Amazon EC2.

  • CLOSED — All ports closed.

" + "documentation":"

The port configuration to use for the new Amazon EC2 instance.

The following configuration options are available:

  • DEFAULT - Use the default firewall settings from the Lightsail instance blueprint.

  • INSTANCE - Use the configured firewall settings from the source Lightsail instance.

  • NONE - Use the default Amazon EC2 security group.

  • CLOSED - All ports closed.

If you configured lightsail-connect as a cidrListAliases on your instance, or if you chose to allow the Lightsail browser-based SSH or RDP clients to connect to your instance, that configuration is not carried over to your new Amazon EC2 instance.

" }, "userData":{ "shape":"string", @@ -5864,7 +5864,9 @@ "NetworkOut", "StatusCheckFailed", "StatusCheckFailed_Instance", - "StatusCheckFailed_System" + "StatusCheckFailed_System", + "BurstCapacityTime", + "BurstCapacityPercentage" ] }, "InstanceNetworking":{ @@ -5897,19 +5899,19 @@ "members":{ "fromPort":{ "shape":"Port", - "documentation":"

The first port in the range.

" + "documentation":"

The first port in a range of open ports on an instance.

Allowed ports:

  • TCP and UDP - 0 to 65535

  • ICMP - The ICMP type. For example, specify 8 as the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, see Control Messages on Wikipedia.

" }, "toPort":{ "shape":"Port", - "documentation":"

The last port in the range.

" + "documentation":"

The last port in a range of open ports on an instance.

Allowed ports:

  • TCP and UDP - 0 to 65535

  • ICMP - The ICMP code. For example, specify 8 as the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, see Control Messages on Wikipedia.

" }, "protocol":{ "shape":"NetworkProtocol", - "documentation":"

The protocol being used. Can be one of the following.

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

" + "documentation":"

The IP protocol name.

The name can be one of the following:

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

  • icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

" }, "accessFrom":{ "shape":"string", - "documentation":"

The location from which access is allowed (e.g., Anywhere (0.0.0.0/0)).

" + "documentation":"

The location from which access is allowed. For example, Anywhere (0.0.0.0/0), or Custom if a specific IP address or range of IP addresses is allowed.

" }, "accessType":{ "shape":"PortAccessType", @@ -5917,14 +5919,22 @@ }, "commonName":{ "shape":"string", - "documentation":"

The common name.

" + "documentation":"

The common name of the port information.

" }, "accessDirection":{ "shape":"AccessDirection", - "documentation":"

The access direction (inbound or outbound).

" + "documentation":"

The access direction (inbound or outbound).

Lightsail currently supports only inbound access direction.

" + }, + "cidrs":{ + "shape":"StringList", + "documentation":"

The IP address, or range of IP addresses in CIDR notation, that are allowed to connect to an instance through the ports, and the protocol. Lightsail supports IPv4 addresses.

For more information about CIDR block notation, see Classless Inter-Domain Routing on Wikipedia.

" + }, + "cidrListAliases":{ + "shape":"StringList", + "documentation":"

An alias that defines access for a preconfigured range of IP addresses.

The only alias currently supported is lightsail-connect, which allows IP addresses of the browser-based RDP/SSH client in the Lightsail console to connect to your instance.

" } }, - "documentation":"

Describes information about the instance ports.

" + "documentation":"

Describes information about ports for an Amazon Lightsail instance.

" }, "InstancePortInfoList":{ "type":"list", @@ -5935,22 +5945,30 @@ "members":{ "fromPort":{ "shape":"Port", - "documentation":"

The first port in the range.

" + "documentation":"

The first port in a range of open ports on an instance.

Allowed ports:

  • TCP and UDP - 0 to 65535

  • ICMP - The ICMP type. For example, specify 8 as the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, see Control Messages on Wikipedia.

" }, "toPort":{ "shape":"Port", - "documentation":"

The last port in the range.

" + "documentation":"

The last port in a range of open ports on an instance.

Allowed ports:

  • TCP and UDP - 0 to 65535

  • ICMP - The ICMP code. For example, specify 8 as the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, see Control Messages on Wikipedia.

" }, "protocol":{ "shape":"NetworkProtocol", - "documentation":"

The protocol being used. Can be one of the following.

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

" + "documentation":"

The IP protocol name.

The name can be one of the following:

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

  • icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

" }, "state":{ "shape":"PortState", - "documentation":"

Specifies whether the instance port is open or closed.

" + "documentation":"

Specifies whether the instance port is open or closed.

The port state for Lightsail instances is always open.

" + }, + "cidrs":{ + "shape":"StringList", + "documentation":"

The IP address, or range of IP addresses in CIDR notation, that are allowed to connect to an instance through the ports, and the protocol. Lightsail supports IPv4 addresses.

For more information about CIDR block notation, see Classless Inter-Domain Routing on Wikipedia.

" + }, + "cidrListAliases":{ + "shape":"StringList", + "documentation":"

An alias that defines access for a preconfigured range of IP addresses.

The only alias currently supported is lightsail-connect, which allows IP addresses of the browser-based RDP/SSH client in the Lightsail console to connect to your instance.

" } }, - "documentation":"

Describes the port state.

" + "documentation":"

Describes open ports on an instance, the IP addresses allowed to connect to the instance through the ports, and the protocol.

" }, "InstancePortStateList":{ "type":"list", @@ -6308,7 +6326,7 @@ }, "status":{ "shape":"LoadBalancerTlsCertificateStatus", - "documentation":"

The status of the SSL/TLS certificate. Valid values are below.

" + "documentation":"

The validation status of the SSL/TLS certificate. Valid values are below.

" }, "domainName":{ "shape":"DomainName", @@ -6593,7 +6611,9 @@ "DiskQueueDepth", "FreeStorageSpace", "NetworkReceiveThroughput", - "NetworkTransmitThroughput" + "NetworkTransmitThroughput", + "BurstCapacityTime", + "BurstCapacityPercentage" ] }, "MetricPeriod":{ @@ -6680,7 +6700,8 @@ "enum":[ "tcp", "all", - "udp" + "udp", + "icmp" ] }, "NonEmptyString":{ @@ -6711,11 +6732,11 @@ "members":{ "portInfo":{ "shape":"PortInfo", - "documentation":"

An array of key-value pairs containing information about the port mappings.

" + "documentation":"

An object to describe the ports to open for the specified instance.

" }, "instanceName":{ "shape":"ResourceName", - "documentation":"

The name of the instance for which you want to open the public ports.

" + "documentation":"

The name of the instance for which to open ports.

" } } }, @@ -6724,7 +6745,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -6891,7 +6912,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -6938,7 +6959,7 @@ "Port":{ "type":"integer", "max":65535, - "min":0 + "min":-1 }, "PortAccessType":{ "type":"string", @@ -6952,18 +6973,26 @@ "members":{ "fromPort":{ "shape":"Port", - "documentation":"

The first port in the range.

" + "documentation":"

The first port in a range of open ports on an instance.

Allowed ports:

  • TCP and UDP - 0 to 65535

  • ICMP - The ICMP type. For example, specify 8 as the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, see Control Messages on Wikipedia.

" }, "toPort":{ "shape":"Port", - "documentation":"

The last port in the range.

" + "documentation":"

The last port in a range of open ports on an instance.

Allowed ports:

  • TCP and UDP - 0 to 65535

  • ICMP - The ICMP code. For example, specify 8 as the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, see Control Messages on Wikipedia.

" }, "protocol":{ "shape":"NetworkProtocol", - "documentation":"

The protocol.

" + "documentation":"

The IP protocol name.

The name can be one of the following:

  • tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead.

  • all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia.

  • udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead.

  • icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter.

" + }, + "cidrs":{ + "shape":"StringList", + "documentation":"

The IP address, or range of IP addresses in CIDR notation, that are allowed to connect to an instance through the ports, and the protocol. Lightsail supports IPv4 addresses.

Examples:

  • To allow the IP address 192.0.2.44, specify 192.0.2.44 or 192.0.2.44/32.

  • To allow the IP addresses 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

For more information about CIDR block notation, see Classless Inter-Domain Routing on Wikipedia.

" + }, + "cidrListAliases":{ + "shape":"StringList", + "documentation":"

An alias that defines access for a preconfigured range of IP addresses.

The only alias currently supported is lightsail-connect, which allows IP addresses of the browser-based RDP/SSH client in the Lightsail console to connect to your instance.

" } }, - "documentation":"

Describes information about the ports on your virtual private server (or instance).

" + "documentation":"

Describes ports to open on an instance, the IP addresses allowed to connect to the instance through the ports, and the protocol.

" }, "PortInfoList":{ "type":"list", @@ -7006,7 +7035,7 @@ }, "metricName":{ "shape":"MetricName", - "documentation":"

The name of the metric to associate with the alarm.

You can configure up to two alarms per metric.

The following metrics are available for each resource type:

  • Instances: CPUUtilization, NetworkIn, NetworkOut, StatusCheckFailed, StatusCheckFailed_Instance, and StatusCheckFailed_System.

  • Load balancers: ClientTLSNegotiationErrorCount, HealthyHostCount, UnhealthyHostCount, HTTPCode_LB_4XX_Count, HTTPCode_LB_5XX_Count, HTTPCode_Instance_2XX_Count, HTTPCode_Instance_3XX_Count, HTTPCode_Instance_4XX_Count, HTTPCode_Instance_5XX_Count, InstanceResponseTime, RejectedConnectionCount, and RequestCount.

  • Relational databases: CPUUtilization, DatabaseConnections, DiskQueueDepth, FreeStorageSpace, NetworkReceiveThroughput, and NetworkTransmitThroughput.

" + "documentation":"

The name of the metric to associate with the alarm.

You can configure up to two alarms per metric.

The following metrics are available for each resource type:

  • Instances: BurstCapacityPercentage, BurstCapacityTime, CPUUtilization, NetworkIn, NetworkOut, StatusCheckFailed, StatusCheckFailed_Instance, and StatusCheckFailed_System.

  • Load balancers: ClientTLSNegotiationErrorCount, HealthyHostCount, UnhealthyHostCount, HTTPCode_LB_4XX_Count, HTTPCode_LB_5XX_Count, HTTPCode_Instance_2XX_Count, HTTPCode_Instance_3XX_Count, HTTPCode_Instance_4XX_Count, HTTPCode_Instance_5XX_Count, InstanceResponseTime, RejectedConnectionCount, and RequestCount.

  • Relational databases: CPUUtilization, DatabaseConnections, DiskQueueDepth, FreeStorageSpace, NetworkReceiveThroughput, and NetworkTransmitThroughput.

For more information about these metrics, see Metrics available in Lightsail.

" }, "monitoredResourceName":{ "shape":"ResourceName", @@ -7030,7 +7059,7 @@ }, "treatMissingData":{ "shape":"TreatMissingData", - "documentation":"

Sets how this alarm will handle missing data points.

An alarm can treat missing data in the following ways:

  • breaching — Assume the missing data is not within the threshold. Missing data counts towards the number of times the metric is not within the threshold.

  • notBreaching — Assume the missing data is within the threshold. Missing data does not count towards the number of times the metric is not within the threshold.

  • ignore — Ignore the missing data. Maintains the current alarm state.

  • missing — Missing data is treated as missing.

If treatMissingData is not specified, the default behavior of missing is used.

" + "documentation":"

Sets how this alarm will handle missing data points.

An alarm can treat missing data in the following ways:

  • breaching - Assume the missing data is not within the threshold. Missing data counts towards the number of times the metric is not within the threshold.

  • notBreaching - Assume the missing data is within the threshold. Missing data does not count towards the number of times the metric is not within the threshold.

  • ignore - Ignore the missing data. Maintains the current alarm state.

  • missing - Missing data is treated as missing.

If treatMissingData is not specified, the default behavior of missing is used.

" }, "contactProtocols":{ "shape":"ContactProtocolsList", @@ -7038,7 +7067,7 @@ }, "notificationTriggers":{ "shape":"NotificationTriggerList", - "documentation":"

The alarm states that trigger a notification.

An alarm has the following possible states:

  • ALARM — The metric is outside of the defined threshold.

  • INSUFFICIENT_DATA — The alarm has just started, the metric is not available, or not enough data is available for the metric to determine the alarm state.

  • OK — The metric is within the defined threshold.

When you specify a notification trigger, the ALARM state must be specified. The INSUFFICIENT_DATA and OK states can be specified in addition to the ALARM state.

  • If you specify OK as an alarm trigger, a notification is sent when the alarm switches from an ALARM or INSUFFICIENT_DATA alarm state to an OK state. This can be thought of as an all clear alarm notification.

  • If you specify INSUFFICIENT_DATA as the alarm trigger, a notification is sent when the alarm switches from an OK or ALARM alarm state to an INSUFFICIENT_DATA state.

The notification trigger defaults to ALARM if you don't specify this parameter.

" + "documentation":"

The alarm states that trigger a notification.

An alarm has the following possible states:

  • ALARM - The metric is outside of the defined threshold.

  • INSUFFICIENT_DATA - The alarm has just started, the metric is not available, or not enough data is available for the metric to determine the alarm state.

  • OK - The metric is within the defined threshold.

When you specify a notification trigger, the ALARM state must be specified. The INSUFFICIENT_DATA and OK states can be specified in addition to the ALARM state.

  • If you specify OK as an alarm trigger, a notification is sent when the alarm switches from an ALARM or INSUFFICIENT_DATA alarm state to an OK state. This can be thought of as an all clear alarm notification.

  • If you specify INSUFFICIENT_DATA as the alarm trigger, a notification is sent when the alarm switches from an OK or ALARM alarm state to an INSUFFICIENT_DATA state.

The notification trigger defaults to ALARM if you don't specify this parameter.

" }, "notificationEnabled":{ "shape":"boolean", @@ -7051,7 +7080,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7064,11 +7093,11 @@ "members":{ "portInfos":{ "shape":"PortInfoList", - "documentation":"

Specifies information about the public port(s).

" + "documentation":"

An array of objects to describe the ports to open for the specified instance.

" }, "instanceName":{ "shape":"ResourceName", - "documentation":"

The Lightsail instance name of the public port(s) you are setting.

" + "documentation":"

The name of the instance for which to open ports.

" } } }, @@ -7077,7 +7106,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7096,7 +7125,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7115,7 +7144,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7582,7 +7611,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7648,7 +7677,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7683,7 +7712,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7702,7 +7731,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7771,7 +7800,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7794,7 +7823,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7856,7 +7885,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7874,7 +7903,7 @@ }, "state":{ "shape":"AlarmState", - "documentation":"

The alarm state to test.

An alarm has the following possible states that can be tested:

  • ALARM — The metric is outside of the defined threshold.

  • INSUFFICIENT_DATA — The alarm has just started, the metric is not available, or not enough data is available for the metric to determine the alarm state.

  • OK — The metric is within the defined threshold.

" + "documentation":"

The alarm state to test.

An alarm has the following possible states that can be tested:

  • ALARM - The metric is outside of the defined threshold.

  • INSUFFICIENT_DATA - The alarm has just started, the metric is not available, or not enough data is available for the metric to determine the alarm state.

  • OK - The metric is within the defined threshold.

" } } }, @@ -7883,7 +7912,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7921,7 +7950,7 @@ "members":{ "operation":{ "shape":"Operation", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7951,7 +7980,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -7977,7 +8006,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -8008,7 +8037,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -8034,7 +8063,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, @@ -8089,7 +8118,7 @@ "members":{ "operations":{ "shape":"OperationList", - "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the time stamp of the request, and the resources affected by the request.

" + "documentation":"

An array of objects that describe the result of the action, such as the status of the request, the timestamp of the request, and the resources affected by the request.

" } } }, diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index 5aee1c694b4b..878a09001eb2 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie/pom.xml b/services/macie/pom.xml index 6fb103579a85..d1699f997f24 100644 --- a/services/macie/pom.xml +++ b/services/macie/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT macie AWS Java SDK :: Services :: Macie diff --git a/services/macie/src/main/resources/codegen-resources/paginators-1.json b/services/macie/src/main/resources/codegen-resources/paginators-1.json index 0b3465fca728..ec567c5ae402 100644 --- a/services/macie/src/main/resources/codegen-resources/paginators-1.json +++ b/services/macie/src/main/resources/codegen-resources/paginators-1.json @@ -1,14 +1,14 @@ { - "pagination":{ - "ListMemberAccounts":{ - "input_token":"nextToken", - "output_token":"nextToken", - "limit_key":"maxResults" + "pagination": { + "ListMemberAccounts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" }, - "ListS3Resources":{ - "input_token":"nextToken", - "output_token":"nextToken", - "limit_key":"maxResults" + "ListS3Resources": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" } } } diff --git a/services/macie/src/main/resources/codegen-resources/service-2.json b/services/macie/src/main/resources/codegen-resources/service-2.json index c3114b69b02d..194313b76aa0 100644 --- a/services/macie/src/main/resources/codegen-resources/service-2.json +++ b/services/macie/src/main/resources/codegen-resources/service-2.json @@ -24,7 +24,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalException"} ], - "documentation":"

Associates a specified AWS account with Amazon Macie as a member account.

" + "documentation":"

Associates a specified AWS account with Amazon Macie Classic as a member account.

" }, "AssociateS3Resources":{ "name":"AssociateS3Resources", @@ -40,7 +40,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalException"} ], - "documentation":"

Associates specified S3 resources with Amazon Macie for monitoring and data classification. If memberAccountId isn't specified, the action associates specified S3 resources with Macie for the current master account. If memberAccountId is specified, the action associates specified S3 resources with Macie for the specified member account.

" + "documentation":"

Associates specified S3 resources with Amazon Macie Classic for monitoring and data classification. If memberAccountId isn't specified, the action associates specified S3 resources with Macie Classic for the current master account. If memberAccountId is specified, the action associates specified S3 resources with Macie Classic for the specified member account.

" }, "DisassociateMemberAccount":{ "name":"DisassociateMemberAccount", @@ -53,7 +53,7 @@ {"shape":"InvalidInputException"}, {"shape":"InternalException"} ], - "documentation":"

Removes the specified member account from Amazon Macie.

" + "documentation":"

Removes the specified member account from Amazon Macie Classic.

" }, "DisassociateS3Resources":{ "name":"DisassociateS3Resources", @@ -68,7 +68,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalException"} ], - "documentation":"

Removes specified S3 resources from being monitored by Amazon Macie. If memberAccountId isn't specified, the action removes specified S3 resources from Macie for the current master account. If memberAccountId is specified, the action removes specified S3 resources from Macie for the specified member account.

" + "documentation":"

Removes specified S3 resources from being monitored by Amazon Macie Classic. If memberAccountId isn't specified, the action removes specified S3 resources from Macie Classic for the current master account. If memberAccountId is specified, the action removes specified S3 resources from Macie Classic for the specified member account.

" }, "ListMemberAccounts":{ "name":"ListMemberAccounts", @@ -82,7 +82,7 @@ {"shape":"InternalException"}, {"shape":"InvalidInputException"} ], - "documentation":"

Lists all Amazon Macie member accounts for the current Amazon Macie master account.

" + "documentation":"

Lists all Amazon Macie Classic member accounts for the current Amazon Macie Classic master account.

" }, "ListS3Resources":{ "name":"ListS3Resources", @@ -97,7 +97,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalException"} ], - "documentation":"

Lists all the S3 resources associated with Amazon Macie. If memberAccountId isn't specified, the action lists the S3 resources associated with Amazon Macie for the current master account. If memberAccountId is specified, the action lists the S3 resources associated with Amazon Macie for the specified member account.

" + "documentation":"

Lists all the S3 resources associated with Amazon Macie Classic. If memberAccountId isn't specified, the action lists the S3 resources associated with Amazon Macie Classic for the current master account. If memberAccountId is specified, the action lists the S3 resources associated with Amazon Macie Classic for the specified member account.

" }, "UpdateS3Resources":{ "name":"UpdateS3Resources", @@ -112,7 +112,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalException"} ], - "documentation":"

Updates the classification types for the specified S3 resources. If memberAccountId isn't specified, the action updates the classification types of the S3 resources associated with Amazon Macie for the current master account. If memberAccountId is specified, the action updates the classification types of the S3 resources associated with Amazon Macie for the specified member account.

" + "documentation":"

Updates the classification types for the specified S3 resources. If memberAccountId isn't specified, the action updates the classification types of the S3 resources associated with Amazon Macie Classic for the current master account. If memberAccountId is specified, the action updates the classification types of the S3 resources associated with Amazon Macie Classic for the specified member account.

" } }, "shapes":{ @@ -135,7 +135,7 @@ "members":{ "memberAccountId":{ "shape":"AWSAccountId", - "documentation":"

The ID of the AWS account that you want to associate with Amazon Macie as a member account.

" + "documentation":"

The ID of the AWS account that you want to associate with Amazon Macie Classic as a member account.

" } } }, @@ -145,11 +145,11 @@ "members":{ "memberAccountId":{ "shape":"AWSAccountId", - "documentation":"

The ID of the Amazon Macie member account whose resources you want to associate with Macie.

" + "documentation":"

The ID of the Amazon Macie Classic member account whose resources you want to associate with Macie Classic.

" }, "s3Resources":{ "shape":"S3ResourcesClassification", - "documentation":"

The S3 resources that you want to associate with Amazon Macie for monitoring and data classification.

" + "documentation":"

The S3 resources that you want to associate with Amazon Macie Classic for monitoring and data classification.

" } } }, @@ -158,7 +158,7 @@ "members":{ "failedS3Resources":{ "shape":"FailedS3Resources", - "documentation":"

S3 resources that couldn't be associated with Amazon Macie. An error code and an error message are provided for each failed item.

" + "documentation":"

S3 resources that couldn't be associated with Amazon Macie Classic. An error code and an error message are provided for each failed item.

" } } }, @@ -179,10 +179,10 @@ }, "continuous":{ "shape":"S3ContinuousClassificationType", - "documentation":"

A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie begins performing continuous classification after a bucket is successfully associated with Amazon Macie.

" + "documentation":"

A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie Classic begins performing continuous classification after a bucket is successfully associated with Amazon Macie Classic.

" } }, - "documentation":"

The classification type that Amazon Macie applies to the associated S3 resources.

" + "documentation":"

The classification type that Amazon Macie Classic applies to the associated S3 resources.

" }, "ClassificationTypeUpdate":{ "type":"structure", @@ -193,10 +193,10 @@ }, "continuous":{ "shape":"S3ContinuousClassificationType", - "documentation":"

A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie begins performing continuous classification after a bucket is successfully associated with Amazon Macie.

" + "documentation":"

A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie Classic begins performing continuous classification after a bucket is successfully associated with Amazon Macie Classic.

" } }, - "documentation":"

The classification type that Amazon Macie applies to the associated S3 resources. At least one of the classification types (oneTime or continuous) must be specified.

" + "documentation":"

The classification type that Amazon Macie Classic applies to the associated S3 resources. At least one of the classification types (oneTime or continuous) must be specified.

" }, "DisassociateMemberAccountRequest":{ "type":"structure", @@ -204,7 +204,7 @@ "members":{ "memberAccountId":{ "shape":"AWSAccountId", - "documentation":"

The ID of the member account that you want to remove from Amazon Macie.

" + "documentation":"

The ID of the member account that you want to remove from Amazon Macie Classic.

" } } }, @@ -214,11 +214,11 @@ "members":{ "memberAccountId":{ "shape":"AWSAccountId", - "documentation":"

The ID of the Amazon Macie member account whose resources you want to remove from being monitored by Amazon Macie.

" + "documentation":"

The ID of the Amazon Macie Classic member account whose resources you want to remove from being monitored by Amazon Macie Classic.

" }, "associatedS3Resources":{ "shape":"S3Resources", - "documentation":"

The S3 resources (buckets or prefixes) that you want to remove from being monitored and classified by Amazon Macie.

" + "documentation":"

The S3 resources (buckets or prefixes) that you want to remove from being monitored and classified by Amazon Macie Classic.

" } } }, @@ -227,7 +227,7 @@ "members":{ "failedS3Resources":{ "shape":"FailedS3Resources", - "documentation":"

S3 resources that couldn't be removed from being monitored and classified by Amazon Macie. An error code and an error message are provided for each failed item.

" + "documentation":"

S3 resources that couldn't be removed from being monitored and classified by Amazon Macie Classic. An error code and an error message are provided for each failed item.

" } } }, @@ -315,7 +315,7 @@ "members":{ "memberAccounts":{ "shape":"MemberAccounts", - "documentation":"

A list of the Amazon Macie member accounts returned by the action. The current master account is also included in this list.

" + "documentation":"

A list of the Amazon Macie Classic member accounts returned by the action. The current master account is also included in this list.

" }, "nextToken":{ "shape":"NextToken", @@ -328,7 +328,7 @@ "members":{ "memberAccountId":{ "shape":"AWSAccountId", - "documentation":"

The Amazon Macie member account ID whose associated S3 resources you want to list.

" + "documentation":"

The Amazon Macie Classic member account ID whose associated S3 resources you want to list.

" }, "nextToken":{ "shape":"NextToken", @@ -363,10 +363,10 @@ "members":{ "accountId":{ "shape":"AWSAccountId", - "documentation":"

The AWS account ID of the Amazon Macie member account.

" + "documentation":"

The AWS account ID of the Amazon Macie Classic member account.

" } }, - "documentation":"

Contains information about the Amazon Macie member account.

" + "documentation":"

Contains information about the Amazon Macie Classic member account.

" }, "MemberAccounts":{ "type":"list", @@ -420,18 +420,18 @@ "members":{ "bucketName":{ "shape":"BucketName", - "documentation":"

The name of the S3 bucket that you want to associate with Amazon Macie.

" + "documentation":"

The name of the S3 bucket that you want to associate with Amazon Macie Classic.

" }, "prefix":{ "shape":"Prefix", - "documentation":"

The prefix of the S3 bucket that you want to associate with Amazon Macie.

" + "documentation":"

The prefix of the S3 bucket that you want to associate with Amazon Macie Classic.

" }, "classificationType":{ "shape":"ClassificationType", - "documentation":"

The classification type that you want to specify for the resource associated with Amazon Macie.

" + "documentation":"

The classification type that you want to specify for the resource associated with Amazon Macie Classic.

" } }, - "documentation":"

The S3 resources that you want to associate with Amazon Macie for monitoring and data classification. This data type is used as a request parameter in the AssociateS3Resources action and a response parameter in the ListS3Resources action.

" + "documentation":"

The S3 resources that you want to associate with Amazon Macie Classic for monitoring and data classification. This data type is used as a request parameter in the AssociateS3Resources action and a response parameter in the ListS3Resources action.

" }, "S3ResourceClassificationUpdate":{ "type":"structure", @@ -450,7 +450,7 @@ }, "classificationTypeUpdate":{ "shape":"ClassificationTypeUpdate", - "documentation":"

The classification type that you want to update for the resource associated with Amazon Macie.

" + "documentation":"

The classification type that you want to update for the resource associated with Amazon Macie Classic.

" } }, "documentation":"

The S3 resources whose classification types you want to update. This data type is used as a request parameter in the UpdateS3Resources action.

" @@ -473,7 +473,7 @@ "members":{ "memberAccountId":{ "shape":"AWSAccountId", - "documentation":"

The AWS ID of the Amazon Macie member account whose S3 resources' classification types you want to update.

" + "documentation":"

The AWS ID of the Amazon Macie Classic member account whose S3 resources' classification types you want to update.

" }, "s3ResourcesUpdate":{ "shape":"S3ResourcesClassificationUpdate", @@ -491,5 +491,5 @@ } } }, - "documentation":"Amazon Macie

Amazon Macie is a security service that uses machine learning to automatically discover, classify, and protect sensitive data in AWS. Macie recognizes sensitive data such as personally identifiable information (PII) or intellectual property, and provides you with dashboards and alerts that give visibility into how this data is being accessed or moved. For more information, see the Macie User Guide.

" + "documentation":"Amazon Macie Classic

Amazon Macie Classic is a security service that uses machine learning to automatically discover, classify, and protect sensitive data in AWS. Macie Classic recognizes sensitive data such as personally identifiable information (PII) or intellectual property, and provides you with dashboards and alerts that give visibility into how this data is being accessed or moved. For more information, see the Amazon Macie Classic User Guide.

A new Amazon Macie is now available with significant design improvements and additional features, at a lower price and in most AWS Regions. We encourage you to explore and use the new and improved features, and benefit from the reduced cost. To learn about features and pricing for the new Amazon Macie, see Amazon Macie.

" } diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml new file mode 100644 index 000000000000..006b152ea7ad --- /dev/null +++ b/services/macie2/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.13.56-SNAPSHOT + + macie2 + AWS Java SDK :: Services :: Macie2 + The AWS Java SDK for Macie2 module holds the client classes that are used for + communicating with Macie2. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.macie2 + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/macie2/src/main/resources/codegen-resources/paginators-1.json b/services/macie2/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..f3b7195d8e13 --- /dev/null +++ b/services/macie2/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination" : { } +} \ No newline at end of file diff --git a/services/macie2/src/main/resources/codegen-resources/service-2.json b/services/macie2/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..7f478e10e0df --- /dev/null +++ b/services/macie2/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,6540 @@ +{ + "metadata": { + "apiVersion": "2020-01-01", + "endpointPrefix": "macie2", + "signingName": "macie2", + "serviceFullName": "Amazon Macie 2", + "serviceId": "Macie2", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "macie2-2020-01-01", + "signatureVersion": "v4" + }, + "operations": { + "AcceptInvitation": { + "name": "AcceptInvitation", + "http": { + "method": "POST", + "requestUri": "/invitations/accept", + "responseCode": 200 + }, + "input": { + "shape": "AcceptInvitationRequest" + }, + "output": { + "shape": "AcceptInvitationResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Accepts an Amazon Macie membership invitation that was received from a specific account.

" + }, + "BatchGetCustomDataIdentifiers": { + "name": "BatchGetCustomDataIdentifiers", + "http": { + "method": "POST", + "requestUri": "/custom-data-identifiers/get", + "responseCode": 200 + }, + "input": { + "shape": "BatchGetCustomDataIdentifiersRequest" + }, + "output": { + "shape": "BatchGetCustomDataIdentifiersResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about one or more custom data identifiers.

" + }, + "CreateClassificationJob": { + "name": "CreateClassificationJob", + "http": { + "method": "POST", + "requestUri": "/jobs", + "responseCode": 200 + }, + "input": { + "shape": "CreateClassificationJobRequest" + }, + "output": { + "shape": "CreateClassificationJobResponse", + "documentation": "

The request succeeded. The specified job was created.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Creates and defines the settings for a classification job.

" + }, + "CreateCustomDataIdentifier": { + "name": "CreateCustomDataIdentifier", + "http": { + "method": "POST", + "requestUri": "/custom-data-identifiers", + "responseCode": 200 + }, + "input": { + "shape": "CreateCustomDataIdentifierRequest" + }, + "output": { + "shape": "CreateCustomDataIdentifierResponse", + "documentation": "

The request succeeded. The specified custom data identifier was created.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Creates and defines the criteria and other settings for a custom data identifier.

" + }, + "CreateFindingsFilter": { + "name": "CreateFindingsFilter", + "http": { + "method": "POST", + "requestUri": "/findingsfilters", + "responseCode": 200 + }, + "input": { + "shape": "CreateFindingsFilterRequest" + }, + "output": { + "shape": "CreateFindingsFilterResponse" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Creates and defines the criteria and other settings for a findings filter.

" + }, + "CreateInvitations": { + "name": "CreateInvitations", + "http": { + "method": "POST", + "requestUri": "/invitations", + "responseCode": 200 + }, + "input": { + "shape": "CreateInvitationsRequest" + }, + "output": { + "shape": "CreateInvitationsResponse", + "documentation": "

The request succeeded. Processing might not be complete.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Sends an Amazon Macie membership invitation to one or more accounts.

" + }, + "CreateMember": { + "name": "CreateMember", + "http": { + "method": "POST", + "requestUri": "/members", + "responseCode": 200 + }, + "input": { + "shape": "CreateMemberRequest" + }, + "output": { + "shape": "CreateMemberResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Associates an account with an Amazon Macie master account.

" + }, + "CreateSampleFindings": { + "name": "CreateSampleFindings", + "http": { + "method": "POST", + "requestUri": "/findings/sample", + "responseCode": 200 + }, + "input": { + "shape": "CreateSampleFindingsRequest" + }, + "output": { + "shape": "CreateSampleFindingsResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Creates sample findings.

" + }, + "DeclineInvitations": { + "name": "DeclineInvitations", + "http": { + "method": "POST", + "requestUri": "/invitations/decline", + "responseCode": 200 + }, + "input": { + "shape": "DeclineInvitationsRequest" + }, + "output": { + "shape": "DeclineInvitationsResponse", + "documentation": "

The request succeeded. Processing might not be complete.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Declines Amazon Macie membership invitations that were received from specific accounts.

" + }, + "DeleteCustomDataIdentifier": { + "name": "DeleteCustomDataIdentifier", + "http": { + "method": "DELETE", + "requestUri": "/custom-data-identifiers/{id}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteCustomDataIdentifierRequest" + }, + "output": { + "shape": "DeleteCustomDataIdentifierResponse", + "documentation": "

The request succeeded. The specified custom data identifier was deleted and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Deletes a custom data identifier.

" + }, + "DeleteFindingsFilter": { + "name": "DeleteFindingsFilter", + "http": { + "method": "DELETE", + "requestUri": "/findingsfilters/{id}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteFindingsFilterRequest" + }, + "output": { + "shape": "DeleteFindingsFilterResponse", + "documentation": "

The request succeeded. The specified findings filter was deleted and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Deletes a findings filter.

" + }, + "DeleteInvitations": { + "name": "DeleteInvitations", + "http": { + "method": "POST", + "requestUri": "/invitations/delete", + "responseCode": 200 + }, + "input": { + "shape": "DeleteInvitationsRequest" + }, + "output": { + "shape": "DeleteInvitationsResponse", + "documentation": "

The request succeeded. Processing might not be complete.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Deletes Amazon Macie membership invitations that were received from specific accounts.

" + }, + "DeleteMember": { + "name": "DeleteMember", + "http": { + "method": "DELETE", + "requestUri": "/members/{id}", + "responseCode": 200 + }, + "input": { + "shape": "DeleteMemberRequest" + }, + "output": { + "shape": "DeleteMemberResponse", + "documentation": "

The request succeeded. The association was deleted and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Deletes the association between an Amazon Macie master account and an account.

" + }, + "DescribeBuckets": { + "name": "DescribeBuckets", + "http": { + "method": "POST", + "requestUri": "/datasources/s3", + "responseCode": 200 + }, + "input": { + "shape": "DescribeBucketsRequest" + }, + "output": { + "shape": "DescribeBucketsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves (queries) statistical data and other information about one or more S3 buckets that Amazon Macie monitors and analyzes.

" + }, + "DescribeClassificationJob": { + "name": "DescribeClassificationJob", + "http": { + "method": "GET", + "requestUri": "/jobs/{jobId}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeClassificationJobRequest" + }, + "output": { + "shape": "DescribeClassificationJobResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about the status and settings for a classification job.

" + }, + "DescribeOrganizationConfiguration": { + "name": "DescribeOrganizationConfiguration", + "http": { + "method": "GET", + "requestUri": "/admin/configuration", + "responseCode": 200 + }, + "input": { + "shape": "DescribeOrganizationConfigurationRequest" + }, + "output": { + "shape": "DescribeOrganizationConfigurationResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about the Amazon Macie configuration settings for an AWS organization.

" + }, + "DisableMacie": { + "name": "DisableMacie", + "http": { + "method": "DELETE", + "requestUri": "/macie", + "responseCode": 200 + }, + "input": { + "shape": "DisableMacieRequest" + }, + "output": { + "shape": "DisableMacieResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Disables an Amazon Macie account and deletes Macie resources for the account.

" + }, + "DisableOrganizationAdminAccount": { + "name": "DisableOrganizationAdminAccount", + "http": { + "method": "DELETE", + "requestUri": "/admin", + "responseCode": 200 + }, + "input": { + "shape": "DisableOrganizationAdminAccountRequest" + }, + "output": { + "shape": "DisableOrganizationAdminAccountResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Disables an account as a delegated administrator of Amazon Macie for an AWS organization.

" + }, + "DisassociateFromMasterAccount": { + "name": "DisassociateFromMasterAccount", + "http": { + "method": "POST", + "requestUri": "/master/disassociate", + "responseCode": 200 + }, + "input": { + "shape": "DisassociateFromMasterAccountRequest" + }, + "output": { + "shape": "DisassociateFromMasterAccountResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Disassociates a member account from its Amazon Macie master account.

" + }, + "DisassociateMember": { + "name": "DisassociateMember", + "http": { + "method": "POST", + "requestUri": "/members/disassociate/{id}", + "responseCode": 200 + }, + "input": { + "shape": "DisassociateMemberRequest" + }, + "output": { + "shape": "DisassociateMemberResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Disassociates an Amazon Macie master account from a member account.

" + }, + "EnableMacie": { + "name": "EnableMacie", + "http": { + "method": "POST", + "requestUri": "/macie", + "responseCode": 200 + }, + "input": { + "shape": "EnableMacieRequest" + }, + "output": { + "shape": "EnableMacieResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Enables Amazon Macie and specifies the configuration settings for a Macie account.

" + }, + "EnableOrganizationAdminAccount": { + "name": "EnableOrganizationAdminAccount", + "http": { + "method": "POST", + "requestUri": "/admin", + "responseCode": 200 + }, + "input": { + "shape": "EnableOrganizationAdminAccountRequest" + }, + "output": { + "shape": "EnableOrganizationAdminAccountResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Enables an account as a delegated administrator of Amazon Macie for an AWS organization.

" + }, + "GetBucketStatistics": { + "name": "GetBucketStatistics", + "http": { + "method": "POST", + "requestUri": "/datasources/s3/statistics", + "responseCode": 200 + }, + "input": { + "shape": "GetBucketStatisticsRequest" + }, + "output": { + "shape": "GetBucketStatisticsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves (queries) aggregated statistical data for all the S3 buckets that Amazon Macie monitors and analyzes.

" + }, + "GetClassificationExportConfiguration": { + "name": "GetClassificationExportConfiguration", + "http": { + "method": "GET", + "requestUri": "/classification-export-configuration", + "responseCode": 200 + }, + "input": { + "shape": "GetClassificationExportConfigurationRequest" + }, + "output": { + "shape": "GetClassificationExportConfigurationResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves the configuration settings for storing data classification results.

" + }, + "GetCustomDataIdentifier": { + "name": "GetCustomDataIdentifier", + "http": { + "method": "GET", + "requestUri": "/custom-data-identifiers/{id}", + "responseCode": 200 + }, + "input": { + "shape": "GetCustomDataIdentifierRequest" + }, + "output": { + "shape": "GetCustomDataIdentifierResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about the criteria and other settings for a custom data identifier.

" + }, + "GetFindingStatistics": { + "name": "GetFindingStatistics", + "http": { + "method": "POST", + "requestUri": "/findings/statistics", + "responseCode": 200 + }, + "input": { + "shape": "GetFindingStatisticsRequest" + }, + "output": { + "shape": "GetFindingStatisticsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves (queries) aggregated statistical data about findings.

" + }, + "GetFindings": { + "name": "GetFindings", + "http": { + "method": "POST", + "requestUri": "/findings/describe", + "responseCode": 200 + }, + "input": { + "shape": "GetFindingsRequest" + }, + "output": { + "shape": "GetFindingsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about one or more findings.

" + }, + "GetFindingsFilter": { + "name": "GetFindingsFilter", + "http": { + "method": "GET", + "requestUri": "/findingsfilters/{id}", + "responseCode": 200 + }, + "input": { + "shape": "GetFindingsFilterRequest" + }, + "output": { + "shape": "GetFindingsFilterResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about the criteria and other settings for a findings filter.

" + }, + "GetInvitationsCount": { + "name": "GetInvitationsCount", + "http": { + "method": "GET", + "requestUri": "/invitations/count", + "responseCode": 200 + }, + "input": { + "shape": "GetInvitationsCountRequest" + }, + "output": { + "shape": "GetInvitationsCountResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves the count of Amazon Macie membership invitations that were received by an account.

" + }, + "GetMacieSession": { + "name": "GetMacieSession", + "http": { + "method": "GET", + "requestUri": "/macie", + "responseCode": 200 + }, + "input": { + "shape": "GetMacieSessionRequest" + }, + "output": { + "shape": "GetMacieSessionResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about the current status and configuration settings for an Amazon Macie account.

" + }, + "GetMasterAccount": { + "name": "GetMasterAccount", + "http": { + "method": "GET", + "requestUri": "/master", + "responseCode": 200 + }, + "input": { + "shape": "GetMasterAccountRequest" + }, + "output": { + "shape": "GetMasterAccountResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about the Amazon Macie master account for an account.

" + }, + "GetMember": { + "name": "GetMember", + "http": { + "method": "GET", + "requestUri": "/members/{id}", + "responseCode": 200 + }, + "input": { + "shape": "GetMemberRequest" + }, + "output": { + "shape": "GetMemberResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about a member account that's associated with an Amazon Macie master account.

" + }, + "GetUsageStatistics": { + "name": "GetUsageStatistics", + "http": { + "method": "POST", + "requestUri": "/usage/statistics", + "responseCode": 200 + }, + "input": { + "shape": "GetUsageStatisticsRequest" + }, + "output": { + "shape": "GetUsageStatisticsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves (queries) quotas and aggregated usage data for one or more accounts.

" + }, + "GetUsageTotals": { + "name": "GetUsageTotals", + "http": { + "method": "GET", + "requestUri": "/usage", + "responseCode": 200 + }, + "input": { + "shape": "GetUsageTotalsRequest" + }, + "output": { + "shape": "GetUsageTotalsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves (queries) aggregated usage data for an account.

" + }, + "ListClassificationJobs": { + "name": "ListClassificationJobs", + "http": { + "method": "POST", + "requestUri": "/jobs/list", + "responseCode": 200 + }, + "input": { + "shape": "ListClassificationJobsRequest" + }, + "output": { + "shape": "ListClassificationJobsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about the status and settings for one or more classification jobs.

" + }, + "ListCustomDataIdentifiers": { + "name": "ListCustomDataIdentifiers", + "http": { + "method": "POST", + "requestUri": "/custom-data-identifiers/list", + "responseCode": 200 + }, + "input": { + "shape": "ListCustomDataIdentifiersRequest" + }, + "output": { + "shape": "ListCustomDataIdentifiersResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves a subset of information about all the custom data identifiers for an account.

" + }, + "ListFindings": { + "name": "ListFindings", + "http": { + "method": "POST", + "requestUri": "/findings", + "responseCode": 200 + }, + "input": { + "shape": "ListFindingsRequest" + }, + "output": { + "shape": "ListFindingsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves a subset of information about one or more findings.

" + }, + "ListFindingsFilters": { + "name": "ListFindingsFilters", + "http": { + "method": "GET", + "requestUri": "/findingsfilters", + "responseCode": 200 + }, + "input": { + "shape": "ListFindingsFiltersRequest" + }, + "output": { + "shape": "ListFindingsFiltersResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves a subset of information about all the findings filters for an account.

" + }, + "ListInvitations": { + "name": "ListInvitations", + "http": { + "method": "GET", + "requestUri": "/invitations", + "responseCode": 200 + }, + "input": { + "shape": "ListInvitationsRequest" + }, + "output": { + "shape": "ListInvitationsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about all the Amazon Macie membership invitations that were received by an account.

" + }, + "ListMembers": { + "name": "ListMembers", + "http": { + "method": "GET", + "requestUri": "/members", + "responseCode": 200 + }, + "input": { + "shape": "ListMembersRequest" + }, + "output": { + "shape": "ListMembersResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about the accounts that are associated with an Amazon Macie master account.

" + }, + "ListOrganizationAdminAccounts": { + "name": "ListOrganizationAdminAccounts", + "http": { + "method": "GET", + "requestUri": "/admin", + "responseCode": 200 + }, + "input": { + "shape": "ListOrganizationAdminAccountsRequest" + }, + "output": { + "shape": "ListOrganizationAdminAccountsResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Retrieves information about the account that's designated as the delegated administrator of Amazon Macie for an AWS organization.

" + }, + "ListTagsForResource": { + "name": "ListTagsForResource", + "http": { + "method": "GET", + "requestUri": "/tags/{resourceArn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "output": { + "shape": "ListTagsForResourceResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [], + "documentation": "

Retrieves the tags (keys and values) that are associated with a classification job, custom data identifier, findings filter, or member account.

" + }, + "PutClassificationExportConfiguration": { + "name": "PutClassificationExportConfiguration", + "http": { + "method": "PUT", + "requestUri": "/classification-export-configuration", + "responseCode": 200 + }, + "input": { + "shape": "PutClassificationExportConfigurationRequest" + }, + "output": { + "shape": "PutClassificationExportConfigurationResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Creates or updates the configuration settings for storing data classification results.

" + }, + "TagResource": { + "name": "TagResource", + "http": { + "method": "POST", + "requestUri": "/tags/{resourceArn}", + "responseCode": 204 + }, + "input": { + "shape": "TagResourceRequest" + }, + "output": { + "shape": "TagResourceResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [], + "documentation": "

Adds or updates one or more tags (keys and values) that are associated with a classification job, custom data identifier, findings filter, or member account.

" + }, + "TestCustomDataIdentifier": { + "name": "TestCustomDataIdentifier", + "http": { + "method": "POST", + "requestUri": "/custom-data-identifiers/test", + "responseCode": 200 + }, + "input": { + "shape": "TestCustomDataIdentifierRequest" + }, + "output": { + "shape": "TestCustomDataIdentifierResponse", + "documentation": "

The request succeeded.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Tests a custom data identifier.

" + }, + "UntagResource": { + "name": "UntagResource", + "http": { + "method": "DELETE", + "requestUri": "/tags/{resourceArn}", + "responseCode": 204 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "output": { + "shape": "UntagResourceResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [], + "documentation": "

Removes one or more tags (keys and values) from a classification job, custom data identifier, findings filter, or member account.

" + }, + "UpdateClassificationJob": { + "name": "UpdateClassificationJob", + "http": { + "method": "PATCH", + "requestUri": "/jobs/{jobId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateClassificationJobRequest" + }, + "output": { + "shape": "UpdateClassificationJobResponse", + "documentation": "

The request succeeded. The job's status was changed and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Cancels a classification job.

" + }, + "UpdateFindingsFilter": { + "name": "UpdateFindingsFilter", + "http": { + "method": "PATCH", + "requestUri": "/findingsfilters/{id}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateFindingsFilterRequest" + }, + "output": { + "shape": "UpdateFindingsFilterResponse", + "documentation": "

The request succeeded. The specified findings filter was updated.

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Updates the criteria and other settings for a findings filter.

" + }, + "UpdateMacieSession": { + "name": "UpdateMacieSession", + "http": { + "method": "PATCH", + "requestUri": "/macie", + "responseCode": 200 + }, + "input": { + "shape": "UpdateMacieSessionRequest" + }, + "output": { + "shape": "UpdateMacieSessionResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Suspends or re-enables an Amazon Macie account, or updates the configuration settings for a Macie account.

" + }, + "UpdateMemberSession": { + "name": "UpdateMemberSession", + "http": { + "method": "PATCH", + "requestUri": "/macie/members/{id}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateMemberSessionRequest" + }, + "output": { + "shape": "UpdateMemberSessionResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Enables an Amazon Macie master account to suspend or re-enable a member account.

" + }, + "UpdateOrganizationConfiguration": { + "name": "UpdateOrganizationConfiguration", + "http": { + "method": "PATCH", + "requestUri": "/admin/configuration", + "responseCode": 200 + }, + "input": { + "shape": "UpdateOrganizationConfigurationRequest" + }, + "output": { + "shape": "UpdateOrganizationConfigurationResponse", + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "errors": [ + { + "shape": "ValidationException", + "documentation": "

The request failed because it contains a syntax error.

" + }, + { + "shape": "InternalServerException", + "documentation": "

The request failed due to an unknown internal server error, exception, or failure.

" + }, + { + "shape": "ServiceQuotaExceededException", + "documentation": "

The request failed because fulfilling the request would exceed one or more service quotas for your account.

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

The request was denied because you don't have sufficient access to the specified resource.

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

The request failed because the specified resource wasn't found.

" + }, + { + "shape": "ThrottlingException", + "documentation": "

The request failed because you sent too many requests during a certain amount of time.

" + }, + { + "shape": "ConflictException", + "documentation": "

The request failed because it conflicts with the current state of the specified resource.

" + } + ], + "documentation": "

Updates Amazon Macie configuration settings for an AWS organization.

" + } + }, + "shapes": { + "AcceptInvitationRequest": { + "type": "structure", + "members": { + "invitationId": { + "shape": "__string", + "locationName": "invitationId", + "documentation": "

The unique identifier for the invitation to accept.

" + }, + "masterAccount": { + "shape": "__string", + "locationName": "masterAccount", + "documentation": "

The AWS account ID for the account that sent the invitation.

" + } + }, + "required": [ + "masterAccount", + "invitationId" + ] + }, + "AcceptInvitationResponse": { + "type": "structure", + "members": {} + }, + "AccessControlList": { + "type": "structure", + "members": { + "allowsPublicReadAccess": { + "shape": "__boolean", + "locationName": "allowsPublicReadAccess", + "documentation": "

Specifies whether the ACL grants the general public with read access permissions for the bucket.

" + }, + "allowsPublicWriteAccess": { + "shape": "__boolean", + "locationName": "allowsPublicWriteAccess", + "documentation": "

Specifies whether the ACL grants the general public with write access permissions for the bucket.

" + } + }, + "documentation": "

Provides information about the permissions settings of the bucket-level access control list (ACL) for an S3 bucket.

" + }, + "AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error that occurred.

" + } + }, + "documentation": "

Provides information about an error that occurred due to insufficient access to a specified resource.

", + "exception": true, + "error": { + "httpStatusCode": 403 + } + }, + "AccountDetail": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The AWS account ID for the account.

" + }, + "email": { + "shape": "__string", + "locationName": "email", + "documentation": "

The email address for the account.

" + } + }, + "documentation": "

Specifies details for an account to associate with an Amazon Macie master account.

", + "required": [ + "email", + "accountId" + ] + }, + "AccountLevelPermissions": { + "type": "structure", + "members": { + "blockPublicAccess": { + "shape": "BlockPublicAccess", + "locationName": "blockPublicAccess", + "documentation": "

The block public access settings for the bucket.

" + } + }, + "documentation": "

Provides information about account-level permissions settings that apply to an S3 bucket.

" + }, + "AdminAccount": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The AWS account ID for the account.

" + }, + "status": { + "shape": "AdminStatus", + "locationName": "status", + "documentation": "

The current status of the account as a delegated administrator of Amazon Macie for the organization.

" + } + }, + "documentation": "

Provides information about an account that's designated as a delegated administrator of Amazon Macie for an AWS organization.

" + }, + "AdminStatus": { + "type": "string", + "documentation": "

The current status of an account as a delegated administrator of Amazon Macie for an AWS organization.

", + "enum": [ + "ENABLED", + "DISABLING_IN_PROGRESS" + ] + }, + "ApiCallDetails": { + "type": "structure", + "members": { + "api": { + "shape": "__string", + "locationName": "api", + "documentation": "

Reserved for future use.

" + }, + "apiServiceName": { + "shape": "__string", + "locationName": "apiServiceName", + "documentation": "

Reserved for future use.

" + }, + "firstSeen": { + "shape": "__timestampIso8601", + "locationName": "firstSeen", + "documentation": "

Reserved for future use.

" + }, + "lastSeen": { + "shape": "__timestampIso8601", + "locationName": "lastSeen", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "AssumedRole": { + "type": "structure", + "members": { + "accessKeyId": { + "shape": "__string", + "locationName": "accessKeyId", + "documentation": "

Reserved for future use.

" + }, + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

Reserved for future use.

" + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

Reserved for future use.

" + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

Reserved for future use.

" + }, + "sessionContext": { + "shape": "SessionContext", + "locationName": "sessionContext", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "AwsAccount": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

Reserved for future use.

" + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "AwsService": { + "type": "structure", + "members": { + "invokedBy": { + "shape": "__string", + "locationName": "invokedBy", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "BatchGetCustomDataIdentifierSummary": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the custom data identifier.

" + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the custom data identifier was created.

" + }, + "deleted": { + "shape": "__boolean", + "locationName": "deleted", + "documentation": "

Specifies whether the custom data identifier was deleted. If you delete a custom data identifier, Amazon Macie doesn't delete it permanently. Instead, it soft deletes the identifier.

" + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The custom description of the custom data identifier.

" + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The unique identifier for the custom data identifier.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The custom name of the custom data identifier.

" + } + }, + "documentation": "

Provides information about a custom data identifier.

" + }, + "BatchGetCustomDataIdentifiersRequest": { + "type": "structure", + "members": { + "ids": { + "shape": "__listOf__string", + "locationName": "ids", + "documentation": "

An array of strings that lists the unique identifiers for the custom data identifiers to retrieve information about.

" + } + } + }, + "BatchGetCustomDataIdentifiersResponse": { + "type": "structure", + "members": { + "customDataIdentifiers": { + "shape": "__listOfBatchGetCustomDataIdentifierSummary", + "locationName": "customDataIdentifiers", + "documentation": "

An array of objects, one for each custom data identifier that meets the criteria specified in the request.

" + }, + "notFoundIdentifierIds": { + "shape": "__listOf__string", + "locationName": "notFoundIdentifierIds", + "documentation": "

An array of identifiers, one for each identifier that was specified in the request, but doesn't correlate to an existing custom data identifier.

" + } + } + }, + "BlockPublicAccess": { + "type": "structure", + "members": { + "blockPublicAcls": { + "shape": "__boolean", + "locationName": "blockPublicAcls", + "documentation": "

Specifies whether Amazon S3 blocks public access control lists (ACLs) for the bucket and objects in the bucket.

" + }, + "blockPublicPolicy": { + "shape": "__boolean", + "locationName": "blockPublicPolicy", + "documentation": "

Specifies whether Amazon S3 blocks public bucket policies for the bucket.

" + }, + "ignorePublicAcls": { + "shape": "__boolean", + "locationName": "ignorePublicAcls", + "documentation": "

Specifies whether Amazon S3 ignores public ACLs for the bucket and objects in the bucket.

" + }, + "restrictPublicBuckets": { + "shape": "__boolean", + "locationName": "restrictPublicBuckets", + "documentation": "

Specifies whether Amazon S3 restricts public bucket policies for the bucket.

" + } + }, + "documentation": "

Provides information about the block public access settings for an S3 bucket. These settings can apply to a bucket at the account level or bucket level. For detailed information about each setting, see Using Amazon S3 block public access in the Amazon Simple Storage Service Developer Guide.

" + }, + "BucketCountByEffectivePermission": { + "type": "structure", + "members": { + "publiclyAccessible": { + "shape": "__long", + "locationName": "publiclyAccessible", + "documentation": "

Reserved for future use.

" + }, + "publiclyReadable": { + "shape": "__long", + "locationName": "publiclyReadable", + "documentation": "

Reserved for future use.

" + }, + "publiclyWritable": { + "shape": "__long", + "locationName": "publiclyWritable", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

The total number of buckets that are publicly accessible, based on a combination of permissions settings for each bucket.

" + }, + "BucketCountByEncryptionType": { + "type": "structure", + "members": { + "kmsManaged": { + "shape": "__long", + "locationName": "kmsManaged", + "documentation": "

Reserved for future use.

" + }, + "s3Managed": { + "shape": "__long", + "locationName": "s3Managed", + "documentation": "

Reserved for future use.

" + }, + "unencrypted": { + "shape": "__long", + "locationName": "unencrypted", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

The total number of buckets, grouped by server-side encryption type. This object also reports the total number of buckets that aren't encrypted.

" + }, + "BucketCountBySharedAccessType": { + "type": "structure", + "members": { + "external": { + "shape": "__long", + "locationName": "external", + "documentation": "

Reserved for future use.

" + }, + "internal": { + "shape": "__long", + "locationName": "internal", + "documentation": "

Reserved for future use.

" + }, + "notShared": { + "shape": "__long", + "locationName": "notShared", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

The total number of buckets that are shared with another AWS account or configured to support cross-origin resource sharing (CORS).

" + }, + "BucketCriteria": { + "type": "map", + "documentation": "

Specifies, as a map, one or more attribute-based conditions that filter the results of a query for information about S3 buckets.

", + "key": { + "shape": "__string" + }, + "value": { + "shape": "BucketCriteriaAdditionalProperties" + } + }, + "BucketCriteriaAdditionalProperties": { + "type": "structure", + "members": { + "eq": { + "shape": "__listOf__string", + "locationName": "eq", + "documentation": "

An equal to condition to apply to a specified attribute value for buckets.

" + }, + "gt": { + "shape": "__long", + "locationName": "gt", + "documentation": "

A greater than condition to apply to a specified attribute value for buckets.

" + }, + "gte": { + "shape": "__long", + "locationName": "gte", + "documentation": "

A greater than or equal to condition to apply to a specified attribute value for buckets.

" + }, + "lt": { + "shape": "__long", + "locationName": "lt", + "documentation": "

A less than condition to apply to a specified attribute value for buckets.

" + }, + "lte": { + "shape": "__long", + "locationName": "lte", + "documentation": "

A less than or equal to condition to apply to a specified attribute value for buckets.

" + }, + "neq": { + "shape": "__listOf__string", + "locationName": "neq", + "documentation": "

A not equal to condition to apply to a specified attribute value for buckets.

" + }, + "prefix": { + "shape": "__string", + "locationName": "prefix", + "documentation": "

The prefix of the buckets to include in the results.

" + } + }, + "documentation": "

Specifies the operator to use in an attribute-based condition that filters the results of a query for information about S3 buckets.

" + }, + "BucketLevelPermissions": { + "type": "structure", + "members": { + "accessControlList": { + "shape": "AccessControlList", + "locationName": "accessControlList", + "documentation": "

The permissions settings of the access control list (ACL) for the bucket. This value is null if an ACL hasn't been defined for the bucket.

" + }, + "blockPublicAccess": { + "shape": "BlockPublicAccess", + "locationName": "blockPublicAccess", + "documentation": "

The block public access settings for the bucket.

" + }, + "bucketPolicy": { + "shape": "BucketPolicy", + "locationName": "bucketPolicy", + "documentation": "

The permissions settings of the bucket policy for the bucket. This value is null if a bucket policy hasn't been defined for the bucket.

" + } + }, + "documentation": "

Provides information about bucket-level permissions settings for an S3 bucket.

" + }, + "BucketMetadata": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The unique identifier for the AWS account that's associated with the bucket.

" + }, + "bucketArn": { + "shape": "__string", + "locationName": "bucketArn", + "documentation": "

The Amazon Resource Name (ARN) of the bucket.

" + }, + "bucketCreatedAt": { + "shape": "__timestampIso8601", + "locationName": "bucketCreatedAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the bucket was created.

" + }, + "bucketName": { + "shape": "__string", + "locationName": "bucketName", + "documentation": "

The name of the bucket.

" + }, + "classifiableObjectCount": { + "shape": "__long", + "locationName": "classifiableObjectCount", + "documentation": "

The total number of objects that Amazon Macie can monitor and analyze in the bucket. These objects use a file format, file extension, or content type that Amazon Macie supports.

" + }, + "lastUpdated": { + "shape": "__timestampIso8601", + "locationName": "lastUpdated", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie last analyzed the bucket.

" + }, + "objectCount": { + "shape": "__long", + "locationName": "objectCount", + "documentation": "

The total number of objects in the bucket.

" + }, + "objectCountByEncryptionType": { + "shape": "ObjectCountByEncryptionType", + "locationName": "objectCountByEncryptionType", + "documentation": "

The total number of objects that are in the bucket, grouped by server-side encryption type. This includes a grouping that reports the total number of objects that aren't encrypted.

" + }, + "publicAccess": { + "shape": "BucketPublicAccess", + "locationName": "publicAccess", + "documentation": "

Specifies whether the bucket is publicly accessible. If this value is true, an access control list (ACL), bucket policy, or block public access settings allow the bucket to be accessed by the general public.

" + }, + "region": { + "shape": "__string", + "locationName": "region", + "documentation": "

The AWS Region that hosts the bucket.

" + }, + "replicationDetails": { + "shape": "ReplicationDetails", + "locationName": "replicationDetails", + "documentation": "

Specifies whether the bucket is configured to replicate one or more objects to buckets for other AWS accounts and, if so, which accounts.

" + }, + "sharedAccess": { + "shape": "SharedAccess", + "locationName": "sharedAccess", + "documentation": "

Specifies whether the bucket is shared with another AWS account or configured to support cross-origin resource sharing (CORS). Valid values are:

  • EXTERNAL - The bucket is shared with an AWS account that isn\u2019t part of the Amazon Macie organization.

  • INTERNAL - The bucket is shared with an AWS account that's part of the Amazon Macie organization.

  • NOT_SHARED - The bucket isn't shared with other AWS accounts.

" + }, + "sizeInBytes": { + "shape": "__long", + "locationName": "sizeInBytes", + "documentation": "

The total storage size, in bytes, of the bucket.

" + }, + "sizeInBytesCompressed": { + "shape": "__long", + "locationName": "sizeInBytesCompressed", + "documentation": "

The total compressed storage size, in bytes, of the bucket.

" + }, + "tags": { + "shape": "__listOfKeyValuePair", + "locationName": "tags", + "documentation": "

An array that specifies the tags (keys and values) that are associated with the bucket.

" + }, + "versioning": { + "shape": "__boolean", + "locationName": "versioning", + "documentation": "

Specifies whether versioning is enabled for the bucket.

" + } + }, + "documentation": "

Provides information about an S3 bucket that Amazon Macie monitors and analyzes.

" + }, + "BucketPermissionConfiguration": { + "type": "structure", + "members": { + "accountLevelPermissions": { + "shape": "AccountLevelPermissions", + "locationName": "accountLevelPermissions", + "documentation": "

The account-level permissions settings that apply to the bucket.

" + }, + "bucketLevelPermissions": { + "shape": "BucketLevelPermissions", + "locationName": "bucketLevelPermissions", + "documentation": "

The bucket-level permissions settings for the bucket.

" + } + }, + "documentation": "

The account-level and bucket-level permissions settings for an S3 bucket, or the bucket that contains an object.

" + }, + "BucketPolicy": { + "type": "structure", + "members": { + "allowsPublicReadAccess": { + "shape": "__boolean", + "locationName": "allowsPublicReadAccess", + "documentation": "

Specifies whether the bucket policy allows the general public to have read access to the bucket.

" + }, + "allowsPublicWriteAccess": { + "shape": "__boolean", + "locationName": "allowsPublicWriteAccess", + "documentation": "

Specifies whether the bucket policy allows the general public to have write access to the bucket.

" + } + }, + "documentation": "

Provides information about the permissions settings of a bucket policy for an S3 bucket.

" + }, + "BucketPublicAccess": { + "type": "structure", + "members": { + "effectivePermission": { + "shape": "EffectivePermission", + "locationName": "effectivePermission", + "documentation": "

Specifies whether the bucket is publicly accessible due to the combination of permissions settings that apply to the bucket. Possible values are: PUBLIC, the bucket is publicly accessible; and, NOT_PUBLIC, the bucket isn't publicly accessible.

" + }, + "permissionConfiguration": { + "shape": "BucketPermissionConfiguration", + "locationName": "permissionConfiguration", + "documentation": "

The account-level and bucket-level permissions for the bucket.

" + } + }, + "documentation": "

Provides information about permissions settings that determine whether an S3 bucket is publicly accessible.

" + }, + "BucketSortCriteria": { + "type": "structure", + "members": { + "attributeName": { + "shape": "__string", + "locationName": "attributeName", + "documentation": "

The name of the attribute to sort the results by. This value can be the name of any property that Amazon Macie defines as bucket metadata, such as bucketName, accountId, or lastUpdated.

" + }, + "orderBy": { + "shape": "OrderBy", + "locationName": "orderBy", + "documentation": "

The sort order to apply to the results, based on the value for the property specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

" + } + }, + "documentation": "

Specifies criteria for sorting the results of a query for information about S3 buckets.

" + }, + "ClassificationDetails": { + "type": "structure", + "members": { + "detailedResultsLocation": { + "shape": "__string", + "locationName": "detailedResultsLocation", + "documentation": "

The Amazon Resource Name (ARN) of the file that contains the detailed record, including offsets, for the finding.

" + }, + "jobArn": { + "shape": "__string", + "locationName": "jobArn", + "documentation": "

The Amazon Resource Name (ARN) of the classification job that produced the finding.

" + }, + "jobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The unique identifier for the classification job that produced the finding.

" + }, + "result": { + "shape": "ClassificationResult", + "locationName": "result", + "documentation": "

The status and detailed results of the finding.

" + } + }, + "documentation": "

Provides information about a sensitive data finding, including the classification job that produced the finding.

" + }, + "ClassificationExportConfiguration": { + "type": "structure", + "members": { + "s3Destination": { + "shape": "S3Destination", + "locationName": "s3Destination", + "documentation": "

The S3 bucket to store data classification results in, and the encryption settings to use when storing results in that bucket.

" + } + }, + "documentation": "

Specifies where to store data classification results, and the encryption settings to use when storing results in that location. Currently, you can store classification results only in an S3 bucket.

" + }, + "ClassificationResult": { + "type": "structure", + "members": { + "customDataIdentifiers": { + "shape": "CustomDataIdentifiers", + "locationName": "customDataIdentifiers", + "documentation": "

The number of occurrences of the data that produced the finding, and the custom data identifiers that detected the data.

" + }, + "mimeType": { + "shape": "__string", + "locationName": "mimeType", + "documentation": "

The type of content, expressed as a MIME type, that the finding applies to. For example, application/gzip, for a GNU Gzip compressed archive file, or application/pdf, for an Adobe PDF file.

" + }, + "sensitiveData": { + "shape": "SensitiveData", + "locationName": "sensitiveData", + "documentation": "

The category and number of occurrences of the sensitive data that produced the finding.

" + }, + "sizeClassified": { + "shape": "__long", + "locationName": "sizeClassified", + "documentation": "

The total size, in bytes, of the data that the finding applies to.

" + }, + "status": { + "shape": "ClassificationResultStatus", + "locationName": "status", + "documentation": "

The status of the finding.

" + } + }, + "documentation": "

Provides detailed information about a sensitive data finding, including the types and number of occurrences of the data that was found.

" + }, + "ClassificationResultStatus": { + "type": "structure", + "members": { + "code": { + "shape": "__string", + "locationName": "code", + "documentation": "

The status of the finding, such as COMPLETE.

" + }, + "reason": { + "shape": "__string", + "locationName": "reason", + "documentation": "

A brief description of the status of the finding. Amazon Macie uses this value to notify you of any errors, warnings, or considerations that might impact your analysis of the finding.

" + } + }, + "documentation": "

Provides information about the status of a sensitive data finding.

" + }, + "ConflictException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error that occurred.

" + } + }, + "documentation": "

Provides information about an error that occurred due to a versioning conflict for a specified resource.

", + "exception": true, + "error": { + "httpStatusCode": 409 + } + }, + "CreateClassificationJobRequest": { + "type": "structure", + "members": { + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

A unique, case-sensitive token that you provide to ensure the idempotency of the request.

", + "idempotencyToken": true + }, + "customDataIdentifierIds": { + "shape": "__listOf__string", + "locationName": "customDataIdentifierIds", + "documentation": "

The custom data identifiers to use for data analysis and classification.

" + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

A custom description of the job. The description can contain as many as 512 characters.

" + }, + "initialRun": { + "shape": "__boolean", + "locationName": "initialRun", + "documentation": "

Specifies whether to run the job immediately, after it's created.

" + }, + "jobType": { + "shape": "JobType", + "locationName": "jobType", + "documentation": "

The schedule for running the job. Valid values are:

  • ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the scheduleFrequency property.

  • SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the scheduleFrequency property to define the recurrence pattern for the job.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

A custom name for the job. The name must contain at least 3 characters and can contain as many as 64 characters.

" + }, + "s3JobDefinition": { + "shape": "S3JobDefinition", + "locationName": "s3JobDefinition", + "documentation": "

The S3 buckets that contain the objects to analyze, and the scope of that analysis.

" + }, + "samplingPercentage": { + "shape": "__integer", + "locationName": "samplingPercentage", + "documentation": "

The sampling depth, as a percentage, to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If the value is less than 100, Amazon Macie randomly selects the objects to analyze, up to the specified percentage.

" + }, + "scheduleFrequency": { + "shape": "JobScheduleFrequency", + "locationName": "scheduleFrequency", + "documentation": "

The recurrence pattern for running the job. To run the job only once, don't specify a value for this property and set the value of the jobType property to ONE_TIME.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that specifies the tags to associate with the job.

A job can have a maximum of 50 tags. Each tag consists of a required tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

" + } + }, + "required": [ + "s3JobDefinition", + "jobType", + "clientToken", + "name" + ] + }, + "CreateClassificationJobResponse": { + "type": "structure", + "members": { + "jobArn": { + "shape": "__string", + "locationName": "jobArn", + "documentation": "

The Amazon Resource Name (ARN) of the job.

" + }, + "jobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The unique identifier for the job.

" + } + } + }, + "CreateCustomDataIdentifierRequest": { + "type": "structure", + "members": { + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

A unique, case-sensitive token that you provide to ensure the idempotency of the request.

", + "idempotencyToken": true + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

A custom description of the custom data identifier. The description can contain up to 120 characters.

We strongly recommend that you avoid including any sensitive data in the description of a custom data identifier. Other users of your account might be able to see the identifier's description, depending on the actions that they're allowed to perform in Amazon Macie.

" + }, + "ignoreWords": { + "shape": "__listOf__string", + "locationName": "ignoreWords", + "documentation": "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4 - 90 characters.

" + }, + "keywords": { + "shape": "__listOf__string", + "locationName": "keywords", + "documentation": "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 4 - 90 characters.

" + }, + "maximumMatchDistance": { + "shape": "__integer", + "locationName": "maximumMatchDistance", + "documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1 - 300 characters. The default value is 50.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

A custom name for the custom data identifier. The name can contain as many as 120 characters.

We strongly recommend that you avoid including any sensitive data in the name of a custom data identifier. Other users of your account might be able to see the identifier's name, depending on the actions that they're allowed to perform in Amazon Macie.

" + }, + "regex": { + "shape": "__string", + "locationName": "regex", + "documentation": "

The regular expression (regex) that defines the pattern to match. The expression can contain as many as 500 characters.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that specifies the tags to associate with the custom data identifier.

A custom data identifier can have a maximum of 50 tags. Each tag consists of a required tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

" + } + } + }, + "CreateCustomDataIdentifierResponse": { + "type": "structure", + "members": { + "customDataIdentifierId": { + "shape": "__string", + "locationName": "customDataIdentifierId", + "documentation": "

The unique identifier for the custom data identifier that was created.

" + } + } + }, + "CreateFindingsFilterRequest": { + "type": "structure", + "members": { + "action": { + "shape": "FindingsFilterAction", + "locationName": "action", + "documentation": "

The action to perform on findings that meet the filter criteria (findingCriteria). Valid values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings.

" + }, + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

A unique, case-sensitive token that you provide to ensure the idempotency of the request.

", + "idempotencyToken": true + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

A custom description of the filter. The description can contain as many as 512 characters.

We strongly recommend that you avoid including any sensitive data in the description of a filter. Other users of your account might be able to see the filter's description, depending on the actions that they're allowed to perform in Amazon Macie.

" + }, + "findingCriteria": { + "shape": "FindingCriteria", + "locationName": "findingCriteria", + "documentation": "

The criteria to use to filter findings.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

A custom name for the filter. The name must contain at least 3 characters and can contain as many as 64 characters.

We strongly recommend that you avoid including any sensitive data in the name of a filter. Other users of your account might be able to see the filter's name, depending on the actions that they're allowed to perform in Amazon Macie.

" + }, + "position": { + "shape": "__integer", + "locationName": "position", + "documentation": "

The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that specifies the tags to associate with the filter.

A findings filter can have a maximum of 50 tags. Each tag consists of a required tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

" + } + }, + "required": [ + "action", + "findingCriteria", + "name" + ] + }, + "CreateFindingsFilterResponse": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the filter that was created.

" + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The unique identifier for the filter that was created.

" + } + } + }, + "CreateInvitationsRequest": { + "type": "structure", + "members": { + "accountIds": { + "shape": "__listOf__string", + "locationName": "accountIds", + "documentation": "

An array that lists AWS account IDs, one for each account to send the invitation to.

" + }, + "disableEmailNotification": { + "shape": "__boolean", + "locationName": "disableEmailNotification", + "documentation": "

Specifies whether to send an email notification to the root user of each account that the invitation will be sent to. This notification is in addition to an alert that the root user receives in AWS Personal Health Dashboard. To send an email notification to the root user of each account, set this value to true.

" + }, + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

A custom message to include in the invitation. Amazon Macie adds this message to the standard content that it sends for an invitation.

" + } + }, + "required": [ + "accountIds" + ] + }, + "CreateInvitationsResponse": { + "type": "structure", + "members": { + "unprocessedAccounts": { + "shape": "__listOfUnprocessedAccount", + "locationName": "unprocessedAccounts", + "documentation": "

An array of objects, one for each account whose invitation hasn't been processed. Each object identifies the account and explains why the invitation hasn't been processed for the account.

" + } + } + }, + "CreateMemberRequest": { + "type": "structure", + "members": { + "account": { + "shape": "AccountDetail", + "locationName": "account", + "documentation": "

The details for the account to associate with the master account.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that specifies the tags to associate with the account in Amazon Macie.

An account can have a maximum of 50 tags. Each tag consists of a required tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

" + } + }, + "required": [ + "account" + ] + }, + "CreateMemberResponse": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the account that was associated with the master account.

" + } + } + }, + "CreateSampleFindingsRequest": { + "type": "structure", + "members": { + "findingTypes": { + "shape": "__listOfFindingType", + "locationName": "findingTypes", + "documentation": "

An array that lists one or more types of findings to include in the set of sample findings. Currently, the only supported value is Policy:IAMUser/S3BucketEncryptionDisabled.

" + } + } + }, + "CreateSampleFindingsResponse": { + "type": "structure", + "members": {} + }, + "Criterion": { + "type": "map", + "documentation": "

Specifies a condition that defines a property, operator, and value to use to filter the results of a query for findings.

", + "key": { + "shape": "__string" + }, + "value": { + "shape": "CriterionAdditionalProperties" + } + }, + "CriterionAdditionalProperties": { + "type": "structure", + "members": { + "eq": { + "shape": "__listOf__string", + "locationName": "eq", + "documentation": "

An equal to condition to apply to a specified property value for findings.

" + }, + "gt": { + "shape": "__long", + "locationName": "gt", + "documentation": "

A greater than condition to apply to a specified property value for findings.

" + }, + "gte": { + "shape": "__long", + "locationName": "gte", + "documentation": "

A greater than or equal to condition to apply to a specified property value for findings.

" + }, + "lt": { + "shape": "__long", + "locationName": "lt", + "documentation": "

A less than condition to apply to a specified property value for findings.

" + }, + "lte": { + "shape": "__long", + "locationName": "lte", + "documentation": "

A less than or equal to condition to apply to a specified property value for findings.

" + }, + "neq": { + "shape": "__listOf__string", + "locationName": "neq", + "documentation": "

A not equal to condition to apply to a specified property value for findings.

" + } + }, + "documentation": "

Specifies the operator to use in a property-based condition that filters the results of a query for findings.

" + }, + "Currency": { + "type": "string", + "documentation": "

The type of currency that data for a usage metric is reported in. Possible values are:

", + "enum": [ + "USD" + ] + }, + "CustomDataIdentifierSummary": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the custom data identifier.

" + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the custom data identifier was created.

" + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The custom description of the custom data identifier.

" + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The unique identifier for the custom data identifier.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The custom name of the custom data identifier.

" + } + }, + "documentation": "

Provides information about a custom data identifier.

" + }, + "CustomDataIdentifiers": { + "type": "structure", + "members": { + "detections": { + "shape": "CustomDetections", + "locationName": "detections", + "documentation": "

The names of the custom data identifiers that detected the data, and the number of occurrences of the data that each identifier detected.

" + }, + "totalCount": { + "shape": "__long", + "locationName": "totalCount", + "documentation": "

The total number of occurrences of the data that was detected by the custom data identifiers and produced the finding.

" + } + }, + "documentation": "

Provides information about the number of occurrences of the data that produced a sensitive data finding, and the custom data identifiers that detected the data for the finding.

" + }, + "CustomDetection": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the custom data identifier.

" + }, + "count": { + "shape": "__long", + "locationName": "count", + "documentation": "

The total number of occurrences of the data that the custom data identifier detected for the finding.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The name of the custom data identifier.

" + } + }, + "documentation": "

Provides information about a custom data identifier that produced a sensitive data finding, and the number of occurrences of the data that it detected for the finding.

" + }, + "CustomDetections": { + "type": "list", + "documentation": "

Reserved for future use.

", + "member": { + "shape": "CustomDetection" + } + }, + "DailySchedule": { + "type": "structure", + "members": {}, + "documentation": "

Run the job once a day, every day. If specified, this is an empty object.

" + }, + "DayOfWeek": { + "type": "string", + "enum": [ + "SUNDAY", + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY" + ] + }, + "DeclineInvitationsRequest": { + "type": "structure", + "members": { + "accountIds": { + "shape": "__listOf__string", + "locationName": "accountIds", + "documentation": "

An array that lists AWS account IDs, one for each account that sent an invitation to decline.

" + } + }, + "required": [ + "accountIds" + ] + }, + "DeclineInvitationsResponse": { + "type": "structure", + "members": { + "unprocessedAccounts": { + "shape": "__listOfUnprocessedAccount", + "locationName": "unprocessedAccounts", + "documentation": "

An array of objects, one for each account whose invitation hasn't been declined. Each object identifies the account and explains why the request hasn't been processed for that account.

" + } + } + }, + "DefaultDetection": { + "type": "structure", + "members": { + "count": { + "shape": "__long", + "locationName": "count", + "documentation": "

The total number of occurrences of the type of data that was detected.

" + }, + "type": { + "shape": "__string", + "locationName": "type", + "documentation": "

The type of data that was detected. For example, AWS_CREDENTIALS, PHONE_NUMBER, or ADDRESS.

" + } + }, + "documentation": "

Provides information about sensitive data that was detected by managed data identifiers and produced a finding.

" + }, + "DefaultDetections": { + "type": "list", + "documentation": "

Reserved for future use.

", + "member": { + "shape": "DefaultDetection" + } + }, + "DeleteCustomDataIdentifierRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

The unique identifier for the Amazon Macie resource or account that the request applies to.

" + } + }, + "required": [ + "id" + ] + }, + "DeleteCustomDataIdentifierResponse": { + "type": "structure", + "members": {} + }, + "DeleteFindingsFilterRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

The unique identifier for the Amazon Macie resource or account that the request applies to.

" + } + }, + "required": [ + "id" + ] + }, + "DeleteFindingsFilterResponse": { + "type": "structure", + "members": {} + }, + "DeleteInvitationsRequest": { + "type": "structure", + "members": { + "accountIds": { + "shape": "__listOf__string", + "locationName": "accountIds", + "documentation": "

An array that lists AWS account IDs, one for each account that sent an invitation to delete.

" + } + }, + "required": [ + "accountIds" + ] + }, + "DeleteInvitationsResponse": { + "type": "structure", + "members": { + "unprocessedAccounts": { + "shape": "__listOfUnprocessedAccount", + "locationName": "unprocessedAccounts", + "documentation": "

An array of objects, one for each account whose invitation hasn't been deleted. Each object identifies the account and explains why the request hasn't been processed for that account.

" + } + } + }, + "DeleteMemberRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

The unique identifier for the Amazon Macie resource or account that the request applies to.

" + } + }, + "required": [ + "id" + ] + }, + "DeleteMemberResponse": { + "type": "structure", + "members": {} + }, + "DescribeBucketsRequest": { + "type": "structure", + "members": { + "criteria": { + "shape": "BucketCriteria", + "locationName": "criteria", + "documentation": "

The criteria to use to filter the query results.

" + }, + "maxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

The maximum number of items to include in each page of the response. The default value is 50.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

" + }, + "sortCriteria": { + "shape": "BucketSortCriteria", + "locationName": "sortCriteria", + "documentation": "

The criteria to use to sort the query results.

" + } + } + }, + "DescribeBucketsResponse": { + "type": "structure", + "members": { + "buckets": { + "shape": "__listOfBucketMetadata", + "locationName": "buckets", + "documentation": "

An array of objects, one for each bucket that meets the filter criteria specified in the request.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, + "DescribeClassificationJobRequest": { + "type": "structure", + "members": { + "jobId": { + "shape": "__string", + "location": "uri", + "locationName": "jobId", + "documentation": "

The unique identifier for the classification job.

" + } + }, + "required": [ + "jobId" + ] + }, + "DescribeClassificationJobResponse": { + "type": "structure", + "members": { + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

The token that was provided to ensure the idempotency of the request to create the job.

", + "idempotencyToken": true + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the job was created.

" + }, + "customDataIdentifierIds": { + "shape": "__listOf__string", + "locationName": "customDataIdentifierIds", + "documentation": "

The custom data identifiers that the job uses to analyze data.

" + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The custom description of the job.

" + }, + "initialRun": { + "shape": "__boolean", + "locationName": "initialRun", + "documentation": "

Specifies whether the job has run for the first time.

" + }, + "jobArn": { + "shape": "__string", + "locationName": "jobArn", + "documentation": "

The Amazon Resource Name (ARN) of the job.

" + }, + "jobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The unique identifier for the job.

" + }, + "jobStatus": { + "shape": "JobStatus", + "locationName": "jobStatus", + "documentation": "

The current status of the job. Possible value are:

  • CANCELLED - The job was cancelled by you or a user of the master account for your organization. A job might also be cancelled if ownership of an S3 bucket changed while the job was running, and that change affected the job's access to the bucket.

  • COMPLETE - Amazon Macie finished processing all the data specified for the job.

  • IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to jobs that occur only once.

  • PAUSED - Amazon Macie started the job, but completion of the job would exceed one or more quotas for your account.

  • RUNNING - The job is in progress.

" + }, + "jobType": { + "shape": "JobType", + "locationName": "jobType", + "documentation": "

The schedule for running the job. Possible value are:

  • ONE_TIME - The job ran or will run only once.

  • SCHEDULED - The job runs on a daily, weekly, or monthly basis. The scheduleFrequency property indicates the recurrence pattern for the job.

" + }, + "lastRunTime": { + "shape": "__timestampIso8601", + "locationName": "lastRunTime", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the job last ran.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The custom name of the job.

" + }, + "s3JobDefinition": { + "shape": "S3JobDefinition", + "locationName": "s3JobDefinition", + "documentation": "

The S3 buckets that the job is configured to analyze, and the scope of that analysis.

" + }, + "samplingPercentage": { + "shape": "__integer", + "locationName": "samplingPercentage", + "documentation": "

The sampling depth, as a percentage, that the job applies when it processes objects.

" + }, + "scheduleFrequency": { + "shape": "JobScheduleFrequency", + "locationName": "scheduleFrequency", + "documentation": "

The recurrence pattern for running the job. If the job is configured to run every day, this value is an empty dailySchedule object. If the job is configured to run only once, this value is null.

" + }, + "statistics": { + "shape": "Statistics", + "locationName": "statistics", + "documentation": "

The number of times that the job has run and processing statistics for the job's most recent run.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that identifies the tags (keys and values) that are associated with the classification job.

" + } + } + }, + "DescribeOrganizationConfigurationRequest": { + "type": "structure", + "members": {} + }, + "DescribeOrganizationConfigurationResponse": { + "type": "structure", + "members": { + "autoEnable": { + "shape": "__boolean", + "locationName": "autoEnable", + "documentation": "

Specifies whether Amazon Macie is enabled automatically for accounts that are added to the AWS organization.

" + }, + "maxAccountLimitReached": { + "shape": "__boolean", + "locationName": "maxAccountLimitReached", + "documentation": "

Specifies whether the maximum number of Amazon Macie member accounts are already associated with the AWS organization.

" + } + } + }, + "DisableMacieRequest": { + "type": "structure", + "members": {} + }, + "DisableMacieResponse": { + "type": "structure", + "members": {} + }, + "DisableOrganizationAdminAccountRequest": { + "type": "structure", + "members": { + "adminAccountId": { + "shape": "__string", + "location": "querystring", + "locationName": "adminAccountId", + "documentation": "

The AWS account ID of the delegated administrator account.

" + } + }, + "required": [ + "adminAccountId" + ] + }, + "DisableOrganizationAdminAccountResponse": { + "type": "structure", + "members": {} + }, + "DisassociateFromMasterAccountRequest": { + "type": "structure", + "members": {} + }, + "DisassociateFromMasterAccountResponse": { + "type": "structure", + "members": {} + }, + "DisassociateMemberRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

The unique identifier for the Amazon Macie resource or account that the request applies to.

" + } + }, + "required": [ + "id" + ] + }, + "DisassociateMemberResponse": { + "type": "structure", + "members": {} + }, + "DomainDetails": { + "type": "structure", + "members": { + "domainName": { + "shape": "__string", + "locationName": "domainName", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

The DNS name of the entity that performed the action on the affected resource.

" + }, + "EffectivePermission": { + "type": "string", + "enum": [ + "PUBLIC", + "NOT_PUBLIC" + ] + }, + "Empty": { + "type": "structure", + "members": {}, + "documentation": "

The request succeeded and there isn't any content to include in the body of the response (No Content).

" + }, + "EnableMacieRequest": { + "type": "structure", + "members": { + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

A unique, case-sensitive token that you provide to ensure the idempotency of the request.

", + "idempotencyToken": true + }, + "findingPublishingFrequency": { + "shape": "FindingPublishingFrequency", + "locationName": "findingPublishingFrequency", + "documentation": "Specifies how often to publish updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events)." + }, + "status": { + "shape": "MacieStatus", + "locationName": "status", + "documentation": "

Specifies the status for the account. To enable Amazon Macie and start all Amazon Macie activities for the account, set this value to ENABLED.

" + } + } + }, + "EnableMacieResponse": { + "type": "structure", + "members": {} + }, + "EnableOrganizationAdminAccountRequest": { + "type": "structure", + "members": { + "adminAccountId": { + "shape": "__string", + "locationName": "adminAccountId", + "documentation": "

The AWS account ID for the account.

" + }, + "clientToken": { + "shape": "__string", + "locationName": "clientToken", + "documentation": "

A unique, case-sensitive token that you provide to ensure the idempotency of the request.

", + "idempotencyToken": true + } + }, + "required": [ + "adminAccountId" + ] + }, + "EnableOrganizationAdminAccountResponse": { + "type": "structure", + "members": {} + }, + "EncryptionType": { + "type": "string", + "documentation": "

The server-side encryption algorithm that's used when storing the S3 bucket or object. Valid values are:

", + "enum": [ + "NONE", + "AES256", + "aws:kms", + "UNKNOWN" + ] + }, + "ErrorCode": { + "type": "string", + "documentation": "

The source of an error, issue, or delay. Possible values are:

", + "enum": [ + "ClientError", + "InternalError" + ] + }, + "FederatedUser": { + "type": "structure", + "members": { + "accessKeyId": { + "shape": "__string", + "locationName": "accessKeyId", + "documentation": "

Reserved for future use.

" + }, + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

Reserved for future use.

" + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

Reserved for future use.

" + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

Reserved for future use.

" + }, + "sessionContext": { + "shape": "SessionContext", + "locationName": "sessionContext", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "Finding": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The identifier for the AWS account that the finding applies to. This is typically the account that owns the affected resource.

" + }, + "archived": { + "shape": "__boolean", + "locationName": "archived", + "documentation": "

Specifies whether the finding is archived.

" + }, + "category": { + "shape": "FindingCategory", + "locationName": "category", + "documentation": "

The category of the finding. Possible values are: CLASSIFICATION, for a sensitive data finding; and, POLICY, for a policy finding.

" + }, + "classificationDetails": { + "shape": "ClassificationDetails", + "locationName": "classificationDetails", + "documentation": "

The details of a sensitive data finding. This value is null for a policy finding.

" + }, + "count": { + "shape": "__long", + "locationName": "count", + "documentation": "

The total number of occurrences of this finding.

" + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the finding was created.

" + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The description of the finding.

" + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The unique identifier for the finding. This is a random string that Amazon Macie generates and assigns to a finding when it creates the finding.

" + }, + "partition": { + "shape": "__string", + "locationName": "partition", + "documentation": "

The AWS partition that Amazon Macie created the finding in.

" + }, + "policyDetails": { + "shape": "PolicyDetails", + "locationName": "policyDetails", + "documentation": "

The details of a policy finding. This value is null for a sensitive data finding.

" + }, + "region": { + "shape": "__string", + "locationName": "region", + "documentation": "

The AWS Region that Amazon Macie created the finding in.

" + }, + "resourcesAffected": { + "shape": "ResourcesAffected", + "locationName": "resourcesAffected", + "documentation": "

The resources that the finding applies to.

" + }, + "sample": { + "shape": "__boolean", + "locationName": "sample", + "documentation": "

Specifies whether the finding is a sample finding. A sample finding is a finding that uses example data to demonstrate what a finding might contain.

" + }, + "schemaVersion": { + "shape": "__string", + "locationName": "schemaVersion", + "documentation": "

The version of the schema that was used to define the data structures in the finding.

" + }, + "severity": { + "shape": "Severity", + "locationName": "severity", + "documentation": "

The severity of the finding.

" + }, + "title": { + "shape": "__string", + "locationName": "title", + "documentation": "

The brief description of the finding.

" + }, + "type": { + "shape": "FindingType", + "locationName": "type", + "documentation": "

The type of the finding.

" + }, + "updatedAt": { + "shape": "__timestampIso8601", + "locationName": "updatedAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the finding was last updated. For sensitive data findings, this value is the same as the value for the createdAt property. Sensitive data findings aren't updated.

" + } + }, + "documentation": "

Provides information about a finding.

" + }, + "FindingAction": { + "type": "structure", + "members": { + "actionType": { + "shape": "FindingActionType", + "locationName": "actionType", + "documentation": "

The type of action that occurred for the affected resource. This value is typically AWS_API_CALL, which indicates that an entity invoked an API operation for the resource.

" + }, + "apiCallDetails": { + "shape": "ApiCallDetails", + "locationName": "apiCallDetails", + "documentation": "

For the affected resource:

  • The name of the operation that was invoked most recently and produced the finding (api).

  • The first date and time when any operation was invoked and produced the finding (firstSeen).

  • The most recent date and time when the specified operation was invoked and produced the finding (lastSeen).

All date and time values are in UTC and extended ISO 8601 format.

" + } + }, + "documentation": "

Provides information about an action that occurred for a resource and produced a policy finding.

" + }, + "FindingActionType": { + "type": "string", + "documentation": "

The type of action that occurred for the resource and produced the policy finding.

", + "enum": [ + "AWS_API_CALL" + ] + }, + "FindingActor": { + "type": "structure", + "members": { + "domainDetails": { + "shape": "DomainDetails", + "locationName": "domainDetails", + "documentation": "

The DNS name of the entity that performed the action on the affected resource.

" + }, + "ipAddressDetails": { + "shape": "IpAddressDetails", + "locationName": "ipAddressDetails", + "documentation": "

The IP address of the device that the entity used to perform the action on the affected resource. This object also provides information such as the owner and geographical location for the IP address.

" + }, + "userIdentity": { + "shape": "UserIdentity", + "locationName": "userIdentity", + "documentation": "

The name and type of entity that performed the action on the affected resource.

" + } + }, + "documentation": "

Provides information about an entity that performed an action that produced a policy finding for a resource.

" + }, + "FindingCategory": { + "type": "string", + "documentation": "

The category of the finding. Valid values are:

", + "enum": [ + "CLASSIFICATION", + "POLICY" + ] + }, + "FindingCriteria": { + "type": "structure", + "members": { + "criterion": { + "shape": "Criterion", + "locationName": "criterion", + "documentation": "

A condition that specifies the property, operator, and value to use to filter the results.

" + } + }, + "documentation": "

Specifies, as a map, one or more property-based conditions that filter the results of a query for findings.

" + }, + "FindingPublishingFrequency": { + "type": "string", + "documentation": "

The frequency with which Amazon Macie publishes updates to policy findings for an account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events). Valid values are:

", + "enum": [ + "FIFTEEN_MINUTES", + "ONE_HOUR", + "SIX_HOURS" + ] + }, + "FindingStatisticsSortAttributeName": { + "type": "string", + "documentation": "

The grouping to sort the results by. Valid values are:

", + "enum": [ + "groupKey", + "count" + ] + }, + "FindingStatisticsSortCriteria": { + "type": "structure", + "members": { + "attributeName": { + "shape": "FindingStatisticsSortAttributeName", + "locationName": "attributeName", + "documentation": "

The grouping to sort the results by. Valid values are: count, sort the results by the number of findings in each group of results; and, groupKey, sort the results by the name of each group of results.

" + }, + "orderBy": { + "shape": "OrderBy", + "locationName": "orderBy", + "documentation": "

The sort order to apply to the results, based on the value for the property specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

" + } + }, + "documentation": "

Specifies criteria for sorting the results of a query for information about findings.

" + }, + "FindingType": { + "type": "string", + "documentation": "

The type of finding. Valid values are:

", + "enum": [ + "SensitiveData:S3Object/Multiple", + "SensitiveData:S3Object/Financial", + "SensitiveData:S3Object/Personal", + "SensitiveData:S3Object/Credentials", + "SensitiveData:S3Object/CustomIdentifier", + "Policy:IAMUser/S3BucketPublic", + "Policy:IAMUser/S3BucketSharedExternally", + "Policy:IAMUser/S3BucketReplicatedExternally", + "Policy:IAMUser/S3BucketEncryptionDisabled", + "Policy:IAMUser/S3BlockPublicAccessDisabled" + ] + }, + "FindingsFilterAction": { + "type": "string", + "documentation": "

The action to perform on findings that meet the filter criteria. To suppress (automatically archive) findings that meet the criteria, set this value to ARCHIVE. Valid values are:

", + "enum": [ + "ARCHIVE", + "NOOP" + ] + }, + "FindingsFilterListItem": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the filter.

" + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The unique identifier for the filter.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The custom name of the filter.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that identifies the tags (keys and values) that are associated with the filter.

" + } + }, + "documentation": "

Provides information about a findings filter.

" + }, + "GetBucketStatisticsRequest": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The unique identifier for the AWS account.

" + } + } + }, + "GetBucketStatisticsResponse": { + "type": "structure", + "members": { + "bucketCount": { + "shape": "__long", + "locationName": "bucketCount", + "documentation": "

The total number of buckets.

" + }, + "bucketCountByEffectivePermission": { + "shape": "BucketCountByEffectivePermission", + "locationName": "bucketCountByEffectivePermission", + "documentation": "

The total number of buckets that are publicly accessible, based on a combination of permissions settings for each bucket.

" + }, + "bucketCountByEncryptionType": { + "shape": "BucketCountByEncryptionType", + "locationName": "bucketCountByEncryptionType", + "documentation": "

The total number of buckets, grouped by server-side encryption type. This object also reports the total number of buckets that aren't encrypted.

" + }, + "bucketCountBySharedAccessType": { + "shape": "BucketCountBySharedAccessType", + "locationName": "bucketCountBySharedAccessType", + "documentation": "

The total number of buckets that are shared with another AWS account or configured to support cross-origin resource sharing (CORS).

" + }, + "classifiableObjectCount": { + "shape": "__long", + "locationName": "classifiableObjectCount", + "documentation": "

The total number of objects that Amazon Macie can monitor and analyze in all the buckets. These objects use a file format, file extension, or content type that Amazon Macie supports.

" + }, + "lastUpdated": { + "shape": "__timestampIso8601", + "locationName": "lastUpdated", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie last analyzed the buckets.

" + }, + "objectCount": { + "shape": "__long", + "locationName": "objectCount", + "documentation": "

The total number of objects in all the buckets.

" + }, + "sizeInBytes": { + "shape": "__long", + "locationName": "sizeInBytes", + "documentation": "

The total storage size, in bytes, of all the buckets.

" + }, + "sizeInBytesCompressed": { + "shape": "__long", + "locationName": "sizeInBytesCompressed", + "documentation": "

The total compressed storage size, in bytes, of all the buckets.

" + } + } + }, + "GetClassificationExportConfigurationRequest": { + "type": "structure", + "members": {} + }, + "GetClassificationExportConfigurationResponse": { + "type": "structure", + "members": { + "configuration": { + "shape": "ClassificationExportConfiguration", + "locationName": "configuration", + "documentation": "

The location where data classification results are stored, and the encryption settings that are used when storing results in that location.

" + } + } + }, + "GetCustomDataIdentifierRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

The unique identifier for the Amazon Macie resource or account that the request applies to.

" + } + }, + "required": [ + "id" + ] + }, + "GetCustomDataIdentifierResponse": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the custom data identifier.

" + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the custom data identifier was created.

" + }, + "deleted": { + "shape": "__boolean", + "locationName": "deleted", + "documentation": "

Specifies whether the custom data identifier was deleted. If you delete a custom data identifier, Amazon Macie doesn't delete it permanently. Instead, it soft deletes the identifier.

" + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The custom description of the custom data identifier.

" + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The unique identifier for the custom data identifier.

" + }, + "ignoreWords": { + "shape": "__listOf__string", + "locationName": "ignoreWords", + "documentation": "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it.

" + }, + "keywords": { + "shape": "__listOf__string", + "locationName": "keywords", + "documentation": "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match.

" + }, + "maximumMatchDistance": { + "shape": "__integer", + "locationName": "maximumMatchDistance", + "documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The custom name of the custom data identifier.

" + }, + "regex": { + "shape": "__string", + "locationName": "regex", + "documentation": "

The regular expression (regex) that defines the pattern to match.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that identifies the tags (keys and values) that are associated with the custom data identifier.

" + } + } + }, + "GetFindingStatisticsRequest": { + "type": "structure", + "members": { + "findingCriteria": { + "shape": "FindingCriteria", + "locationName": "findingCriteria", + "documentation": "

The criteria to use to filter the query results.

" + }, + "groupBy": { + "shape": "GroupBy", + "locationName": "groupBy", + "documentation": "

The finding property to use to group the query results. Valid values are:

  • classificationDetails.jobId - The unique identifier for the classification job that produced the finding.

  • resourcesAffected.s3Bucket.name - The name of the S3 bucket that the finding applies to.

  • severity.description - The severity of the finding, such as High or Medium.

  • type - The type of finding, such as Policy:IAMUser/S3BucketPublic and SensitiveData:S3Object/Personal.

" + }, + "size": { + "shape": "__integer", + "locationName": "size", + "documentation": "

The maximum number of items to include in each page of the response.

" + }, + "sortCriteria": { + "shape": "FindingStatisticsSortCriteria", + "locationName": "sortCriteria", + "documentation": "

The criteria to use to sort the query results.

" + } + }, + "required": [ + "groupBy" + ] + }, + "GetFindingStatisticsResponse": { + "type": "structure", + "members": { + "countsByGroup": { + "shape": "__listOfGroupCount", + "locationName": "countsByGroup", + "documentation": "

An array of objects, one for each group of findings that meet the filter criteria specified in the request.

" + } + } + }, + "GetFindingsFilterRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

The unique identifier for the Amazon Macie resource or account that the request applies to.

" + } + }, + "required": [ + "id" + ] + }, + "GetFindingsFilterResponse": { + "type": "structure", + "members": { + "action": { + "shape": "FindingsFilterAction", + "locationName": "action", + "documentation": "

The action that's performed on findings that meet the filter criteria (findingCriteria). Possible values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings.

" + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the filter.

" + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

The custom description of the filter.

" + }, + "findingCriteria": { + "shape": "FindingCriteria", + "locationName": "findingCriteria", + "documentation": "

The criteria that's used to filter findings.

" + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The unique identifier for the filter.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The custom name of the filter.

" + }, + "position": { + "shape": "__integer", + "locationName": "position", + "documentation": "

The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that identifies the tags (keys and values) that are associated with the filter.

" + } + } + }, + "GetFindingsRequest": { + "type": "structure", + "members": { + "findingIds": { + "shape": "__listOf__string", + "locationName": "findingIds", + "documentation": "

An array of strings that lists the unique identifiers for the findings to retrieve information about.

" + }, + "sortCriteria": { + "shape": "SortCriteria", + "locationName": "sortCriteria", + "documentation": "

The criteria for sorting the results of the request.

" + } + }, + "required": [ + "findingIds" + ] + }, + "GetFindingsResponse": { + "type": "structure", + "members": { + "findings": { + "shape": "__listOfFinding", + "locationName": "findings", + "documentation": "

An array of objects, one for each finding that meets the criteria specified in the request.

" + } + } + }, + "GetInvitationsCountRequest": { + "type": "structure", + "members": {} + }, + "GetInvitationsCountResponse": { + "type": "structure", + "members": { + "invitationsCount": { + "shape": "__long", + "locationName": "invitationsCount", + "documentation": "

The total number of invitations that were received by the account, not including the currently accepted invitation.

" + } + } + }, + "GetMacieSessionRequest": { + "type": "structure", + "members": {} + }, + "GetMacieSessionResponse": { + "type": "structure", + "members": { + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the Amazon Macie account was created.

" + }, + "findingPublishingFrequency": { + "shape": "FindingPublishingFrequency", + "locationName": "findingPublishingFrequency", + "documentation": "

The frequency with which Amazon Macie publishes updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events).

" + }, + "serviceRole": { + "shape": "__string", + "locationName": "serviceRole", + "documentation": "

The Amazon Resource Name (ARN) of the service-level role that allows Amazon Macie to monitor and analyze data in AWS resources for the account.

" + }, + "status": { + "shape": "MacieStatus", + "locationName": "status", + "documentation": "

The current status of the Amazon Macie account. Possible values are: PAUSED, the account is enabled but all Amazon Macie activities are suspended (paused) for the account; and, ENABLED, the account is enabled and all Amazon Macie activities are enabled for the account.

" + }, + "updatedAt": { + "shape": "__timestampIso8601", + "locationName": "updatedAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the Amazon Macie account.

" + } + } + }, + "GetMasterAccountRequest": { + "type": "structure", + "members": {} + }, + "GetMasterAccountResponse": { + "type": "structure", + "members": { + "master": { + "shape": "Invitation", + "locationName": "master", + "documentation": "

The AWS account ID for the master account. If the accounts are associated by a Macie membership invitation, this object also provides details about the invitation that was sent and accepted to establish the relationship between the accounts.

" + } + } + }, + "GetMemberRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

The unique identifier for the Amazon Macie resource or account that the request applies to.

" + } + }, + "required": [ + "id" + ] + }, + "GetMemberResponse": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The AWS account ID for the account.

" + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the account.

" + }, + "email": { + "shape": "__string", + "locationName": "email", + "documentation": "

The email address for the account.

" + }, + "invitedAt": { + "shape": "__timestampIso8601", + "locationName": "invitedAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when an Amazon Macie membership invitation was last sent to the account. This value is null if a Macie invitation hasn't been sent to the account.

" + }, + "masterAccountId": { + "shape": "__string", + "locationName": "masterAccountId", + "documentation": "

The AWS account ID for the master account.

" + }, + "relationshipStatus": { + "shape": "RelationshipStatus", + "locationName": "relationshipStatus", + "documentation": "

The current status of the relationship between the account and the master account.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that identifies the tags (keys and values) that are associated with the member account in Amazon Macie.

" + }, + "updatedAt": { + "shape": "__timestampIso8601", + "locationName": "updatedAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the relationship between the account and the master account.

" + } + } + }, + "GetUsageStatisticsRequest": { + "type": "structure", + "members": { + "filterBy": { + "shape": "__listOfUsageStatisticsFilter", + "locationName": "filterBy", + "documentation": "

The criteria to use to filter the query results.

" + }, + "maxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

The maximum number of items to include in each page of the response.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

" + }, + "sortBy": { + "shape": "UsageStatisticsSortBy", + "locationName": "sortBy", + "documentation": "

The criteria to use to sort the query results.

" + } + } + }, + "GetUsageStatisticsResponse": { + "type": "structure", + "members": { + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + }, + "records": { + "shape": "__listOfUsageRecord", + "locationName": "records", + "documentation": "

An array of objects that contains the results of the query. Each object contains the data for an account that meets the filter criteria specified in the request.

" + } + } + }, + "GetUsageTotalsRequest": { + "type": "structure", + "members": {} + }, + "GetUsageTotalsResponse": { + "type": "structure", + "members": { + "usageTotals": { + "shape": "__listOfUsageTotal", + "locationName": "usageTotals", + "documentation": "

An array of objects that contains the results of the query. Each object contains the data for a specific usage metric.

" + } + } + }, + "GroupBy": { + "type": "string", + "enum": [ + "resourcesAffected.s3Bucket.name", + "type", + "classificationDetails.jobId", + "severity.description" + ] + }, + "GroupCount": { + "type": "structure", + "members": { + "count": { + "shape": "__long", + "locationName": "count", + "documentation": "

The total number of findings in the group of query results.

" + }, + "groupKey": { + "shape": "__string", + "locationName": "groupKey", + "documentation": "

The name of the property that defines the group in the query results, as specified by the groupBy property in the query request.

" + } + }, + "documentation": "

Provides a group of results for a query that retrieved information about findings.

" + }, + "IamUser": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

Reserved for future use.

" + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

Reserved for future use.

" + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

Reserved for future use.

" + }, + "userName": { + "shape": "__string", + "locationName": "userName", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "InternalServerException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error that occurred.

" + } + }, + "documentation": "

Provides information about an error that occurred due to an unknown internal server error, exception, or failure.

", + "exception": true, + "error": { + "httpStatusCode": 500 + } + }, + "Invitation": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The AWS account ID for the account that sent the invitation.

" + }, + "invitationId": { + "shape": "__string", + "locationName": "invitationId", + "documentation": "

The unique identifier for the invitation. Amazon Macie uses this identifier to validate the inviter account with the invitee account.

" + }, + "invitedAt": { + "shape": "__timestampIso8601", + "locationName": "invitedAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the invitation was sent.

" + }, + "relationshipStatus": { + "shape": "RelationshipStatus", + "locationName": "relationshipStatus", + "documentation": "

The status of the relationship between the account that sent the invitation (inviter account) and the account that received the invitation (invitee account).

" + } + }, + "documentation": "

Provides information about an Amazon Macie membership invitation that was received by an account.

" + }, + "IpAddressDetails": { + "type": "structure", + "members": { + "ipAddressV4": { + "shape": "__string", + "locationName": "ipAddressV4", + "documentation": "

Reserved for future use.

" + }, + "ipCity": { + "shape": "IpCity", + "locationName": "ipCity", + "documentation": "

Reserved for future use.

" + }, + "ipCountry": { + "shape": "IpCountry", + "locationName": "ipCountry", + "documentation": "

Reserved for future use.

" + }, + "ipGeoLocation": { + "shape": "IpGeoLocation", + "locationName": "ipGeoLocation", + "documentation": "

Reserved for future use.

" + }, + "ipOwner": { + "shape": "IpOwner", + "locationName": "ipOwner", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

The IP address of the device that the entity used to perform the action on the affected resource. This object also provides information such as the owner and geographical location for the IP address.

" + }, + "IpCity": { + "type": "structure", + "members": { + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "IpCountry": { + "type": "structure", + "members": { + "code": { + "shape": "__string", + "locationName": "code", + "documentation": "

Reserved for future use.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "IpGeoLocation": { + "type": "structure", + "members": { + "lat": { + "shape": "__double", + "locationName": "lat", + "documentation": "

Reserved for future use.

" + }, + "lon": { + "shape": "__double", + "locationName": "lon", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "IpOwner": { + "type": "structure", + "members": { + "asn": { + "shape": "__string", + "locationName": "asn", + "documentation": "

Reserved for future use.

" + }, + "asnOrg": { + "shape": "__string", + "locationName": "asnOrg", + "documentation": "

Reserved for future use.

" + }, + "isp": { + "shape": "__string", + "locationName": "isp", + "documentation": "

Reserved for future use.

" + }, + "org": { + "shape": "__string", + "locationName": "org", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "JobComparator": { + "type": "string", + "documentation": "

The operator to use in a condition. Valid values are:

", + "enum": [ + "EQ", + "GT", + "GTE", + "LT", + "LTE", + "NE", + "CONTAINS" + ] + }, + "JobScheduleFrequency": { + "type": "structure", + "members": { + "dailySchedule": { + "shape": "DailySchedule", + "locationName": "dailySchedule", + "documentation": "

Run the job once a day, every day. If specified, this is an empty object.

" + }, + "monthlySchedule": { + "shape": "MonthlySchedule", + "locationName": "monthlySchedule", + "documentation": "

Run the job once a month, on a specific day of the month. This value can be an integer from 1 through 30.

" + }, + "weeklySchedule": { + "shape": "WeeklySchedule", + "locationName": "weeklySchedule", + "documentation": "

Run the job once a week, on a specific day of the week. Valid values are: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, and SUNDAY.

" + } + }, + "documentation": "

Specifies the recurrence pattern for running a classification job.

" + }, + "JobScopeTerm": { + "type": "structure", + "members": { + "simpleScopeTerm": { + "shape": "SimpleScopeTerm", + "locationName": "simpleScopeTerm", + "documentation": "

A property-based condition that defines a property, operator, and one or more values for including or excluding an object from a job.

" + }, + "tagScopeTerm": { + "shape": "TagScopeTerm", + "locationName": "tagScopeTerm", + "documentation": "

A tag-based condition that defines a property, operator, and one or more values for including or excluding an object from a job.

" + } + }, + "documentation": "

Specifies one or more conditions that determine which objects a classification job analyzes.

" + }, + "JobScopingBlock": { + "type": "structure", + "members": { + "and": { + "shape": "__listOfJobScopeTerm", + "locationName": "and", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "JobStatus": { + "type": "string", + "documentation": "

The current status of a classification job. Valid values are:

", + "enum": [ + "RUNNING", + "PAUSED", + "CANCELLED", + "COMPLETE", + "IDLE" + ] + }, + "JobSummary": { + "type": "structure", + "members": { + "bucketDefinitions": { + "shape": "__listOfS3BucketDefinitionForJob", + "locationName": "bucketDefinitions", + "documentation": "

The S3 buckets that the job is configured to analyze.

" + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the job was created.

" + }, + "jobId": { + "shape": "__string", + "locationName": "jobId", + "documentation": "

The unique identifier for the job.

" + }, + "jobStatus": { + "shape": "JobStatus", + "locationName": "jobStatus", + "documentation": "

The current status of the job. Possible value are:

  • CANCELLED - The job was cancelled by you or a user of the master account for your organization. A job might also be cancelled if ownership of an S3 bucket changed while the job was running, and that change affected the job's access to the bucket.

  • COMPLETE - Amazon Macie finished processing all the data specified for the job.

  • IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to jobs that occur only once.

  • PAUSED - Amazon Macie started the job, but completion of the job would exceed one or more quotas for your account.

  • RUNNING - The job is in progress.

" + }, + "jobType": { + "shape": "JobType", + "locationName": "jobType", + "documentation": "

The schedule for running the job. Possible value are:

  • ONE_TIME - The job ran or will run only once.

  • SCHEDULED - The job runs on a daily, weekly, or monthly basis.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The custom name of the job.

" + } + }, + "documentation": "

Provides information about a classification job, including the current status of the job.

" + }, + "JobType": { + "type": "string", + "documentation": "

The schedule for running a classification job. Valid values are:

", + "enum": [ + "ONE_TIME", + "SCHEDULED" + ] + }, + "KeyValuePair": { + "type": "structure", + "members": { + "key": { + "shape": "__string", + "locationName": "key", + "documentation": "

One part of a key-value pair that comprises a tag. A tag key is a general label that acts as a category for more specific tag values.

" + }, + "value": { + "shape": "__string", + "locationName": "value", + "documentation": "

One part of a key-value pair that comprises a tag. A tag value acts as a descriptor for a tag key. A tag value can be empty or null.

" + } + }, + "documentation": "

Provides information about the tags that are associated with an S3 bucket or object. Each tag consists of a required tag key and an associated tag value.

" + }, + "KeyValuePairList": { + "type": "list", + "documentation": "

Reserved for future use.

", + "member": { + "shape": "KeyValuePair" + } + }, + "ListClassificationJobsRequest": { + "type": "structure", + "members": { + "filterCriteria": { + "shape": "ListJobsFilterCriteria", + "locationName": "filterCriteria", + "documentation": "

The criteria to use to filter the results.

" + }, + "maxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

The maximum number of items to include in each page of the response.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

" + }, + "sortCriteria": { + "shape": "ListJobsSortCriteria", + "locationName": "sortCriteria", + "documentation": "

The criteria to use to sort the results.

" + } + } + }, + "ListClassificationJobsResponse": { + "type": "structure", + "members": { + "items": { + "shape": "__listOfJobSummary", + "locationName": "items", + "documentation": "

An array of objects, one for each job that meets the filter criteria specified in the request.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, + "ListCustomDataIdentifiersRequest": { + "type": "structure", + "members": { + "maxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

The maximum number of items to include in each page of the response.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

" + } + } + }, + "ListCustomDataIdentifiersResponse": { + "type": "structure", + "members": { + "items": { + "shape": "__listOfCustomDataIdentifierSummary", + "locationName": "items", + "documentation": "

An array of objects, one for each custom data identifier.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, + "ListFindingsFiltersRequest": { + "type": "structure", + "members": { + "maxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of items to include in each page of a paginated response.

" + }, + "nextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

" + } + } + }, + "ListFindingsFiltersResponse": { + "type": "structure", + "members": { + "findingsFilterListItems": { + "shape": "__listOfFindingsFilterListItem", + "locationName": "findingsFilterListItems", + "documentation": "

An array of objects, one for each filter that's associated with the account.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, + "ListFindingsRequest": { + "type": "structure", + "members": { + "findingCriteria": { + "shape": "FindingCriteria", + "locationName": "findingCriteria", + "documentation": "

The criteria to use to filter the results.

" + }, + "maxResults": { + "shape": "__integer", + "locationName": "maxResults", + "documentation": "

The maximum number of items to include in each page of the response.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

" + }, + "sortCriteria": { + "shape": "SortCriteria", + "locationName": "sortCriteria", + "documentation": "

The criteria to use to sort the results.

" + } + } + }, + "ListFindingsResponse": { + "type": "structure", + "members": { + "findingIds": { + "shape": "__listOf__string", + "locationName": "findingIds", + "documentation": "

An array of strings, where each string is the unique identifier for a finding that meets the filter criteria specified in the request.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, + "ListInvitationsRequest": { + "type": "structure", + "members": { + "maxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of items to include in each page of a paginated response.

" + }, + "nextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

" + } + } + }, + "ListInvitationsResponse": { + "type": "structure", + "members": { + "invitations": { + "shape": "__listOfInvitation", + "locationName": "invitations", + "documentation": "

An array of objects, one for each invitation that was received by the account.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, + "ListJobsFilterCriteria": { + "type": "structure", + "members": { + "excludes": { + "shape": "__listOfListJobsFilterTerm", + "locationName": "excludes", + "documentation": "

An array of objects, one for each condition that determines which jobs to exclude from the results.

" + }, + "includes": { + "shape": "__listOfListJobsFilterTerm", + "locationName": "includes", + "documentation": "

An array of objects, one for each condition that determines which jobs to include in the results.

" + } + }, + "documentation": "

Specifies criteria for filtering the results of a request for information about classification jobs.

" + }, + "ListJobsFilterKey": { + "type": "string", + "documentation": "

The property to use to filter the results. Valid values are:

", + "enum": [ + "jobType", + "jobStatus", + "createdAt", + "name" + ] + }, + "ListJobsFilterTerm": { + "type": "structure", + "members": { + "comparator": { + "shape": "JobComparator", + "locationName": "comparator", + "documentation": "

The operator to use to filter the results.

" + }, + "key": { + "shape": "ListJobsFilterKey", + "locationName": "key", + "documentation": "

The property to use to filter the results.

" + }, + "values": { + "shape": "__listOf__string", + "locationName": "values", + "documentation": "

An array that lists one or more values to use to filter the results.

" + } + }, + "documentation": "

Specifies a condition that filters the results of a request for information about classification jobs. Each condition consists of a property, an operator, and one or more values.

" + }, + "ListJobsSortAttributeName": { + "type": "string", + "documentation": "

The property to sort the results by. Valid values are:

", + "enum": [ + "createdAt", + "jobStatus", + "name", + "jobType" + ] + }, + "ListJobsSortCriteria": { + "type": "structure", + "members": { + "attributeName": { + "shape": "ListJobsSortAttributeName", + "locationName": "attributeName", + "documentation": "

The property to sort the results by.

" + }, + "orderBy": { + "shape": "OrderBy", + "locationName": "orderBy", + "documentation": "

The sort order to apply to the results, based on the value for the property specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

" + } + }, + "documentation": "

Specifies criteria for sorting the results of a request for information about classification jobs.

" + }, + "ListMembersRequest": { + "type": "structure", + "members": { + "maxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of items to include in each page of a paginated response.

" + }, + "nextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

" + }, + "onlyAssociated": { + "shape": "__string", + "location": "querystring", + "locationName": "onlyAssociated", + "documentation": "

Specifies which accounts to include in the response, based on the status of an account's relationship with the master account. By default, the response includes only current member accounts. To include all accounts, set the value for this parameter to false.

" + } + } + }, + "ListMembersResponse": { + "type": "structure", + "members": { + "members": { + "shape": "__listOfMember", + "locationName": "members", + "documentation": "

An array of objects, one for each account that's associated with the master account and meets the criteria specified by the onlyAssociated request parameter.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, + "ListOrganizationAdminAccountsRequest": { + "type": "structure", + "members": { + "maxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of items to include in each page of a paginated response.

" + }, + "nextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

" + } + } + }, + "ListOrganizationAdminAccountsResponse": { + "type": "structure", + "members": { + "adminAccounts": { + "shape": "__listOfAdminAccount", + "locationName": "adminAccounts", + "documentation": "

An array of objects, one for each account that's designated as a delegated administrator of Amazon Macie for the AWS organization. Of those accounts, only one can have a status of ENABLED.

" + }, + "nextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, + "ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resourceArn", + "documentation": "

The Amazon Resource Name (ARN) of the classification job, custom data identifier, findings filter, or member account.

" + } + }, + "required": [ + "resourceArn" + ] + }, + "ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that identifies the tags (keys and values) that are associated with the resource.

" + } + } + }, + "MacieStatus": { + "type": "string", + "documentation": "

The status of an Amazon Macie account. Valid values are:

", + "enum": [ + "PAUSED", + "ENABLED" + ] + }, + "MaxResults": { + "type": "integer", + "min": 1, + "max": 25 + }, + "Member": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The AWS account ID for the account.

" + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the account.

" + }, + "email": { + "shape": "__string", + "locationName": "email", + "documentation": "

The email address for the account.

" + }, + "invitedAt": { + "shape": "__timestampIso8601", + "locationName": "invitedAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when an Amazon Macie membership invitation was last sent to the account. This value is null if a Macie invitation hasn't been sent to the account.

" + }, + "masterAccountId": { + "shape": "__string", + "locationName": "masterAccountId", + "documentation": "

The AWS account ID for the master account.

" + }, + "relationshipStatus": { + "shape": "RelationshipStatus", + "locationName": "relationshipStatus", + "documentation": "

The current status of the relationship between the account and the master account.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that identifies the tags (keys and values) that are associated with the account in Amazon Macie.

" + }, + "updatedAt": { + "shape": "__timestampIso8601", + "locationName": "updatedAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, of the most recent change to the status of the relationship between the account and the master account.

" + } + }, + "documentation": "

Provides information about an account that's associated with an Amazon Macie master account.

" + }, + "MonthlySchedule": { + "type": "structure", + "members": { + "dayOfMonth": { + "shape": "__integer", + "locationName": "dayOfMonth", + "documentation": "

Run the job once a month, on a specific day of the month. This value can be an integer from 1 through 30.

" + } + }, + "documentation": "

Run the job once a month, on a specific day of the month. This value can be an integer from 1 through 30.

" + }, + "ObjectCountByEncryptionType": { + "type": "structure", + "members": { + "customerManaged": { + "shape": "__long", + "locationName": "customerManaged", + "documentation": "

Reserved for future use.

" + }, + "kmsManaged": { + "shape": "__long", + "locationName": "kmsManaged", + "documentation": "

Reserved for future use.

" + }, + "s3Managed": { + "shape": "__long", + "locationName": "s3Managed", + "documentation": "

Reserved for future use.

" + }, + "unencrypted": { + "shape": "__long", + "locationName": "unencrypted", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

The total number of objects that are in the bucket, grouped by server-side encryption type. This includes a grouping that reports the total number of objects that aren't encrypted.

" + }, + "OrderBy": { + "type": "string", + "enum": [ + "ASC", + "DESC" + ] + }, + "PolicyDetails": { + "type": "structure", + "members": { + "action": { + "shape": "FindingAction", + "locationName": "action", + "documentation": "

The action that occurred and produced the finding.

" + }, + "actor": { + "shape": "FindingActor", + "locationName": "actor", + "documentation": "

The entity that performed the action that produced the finding.

" + } + }, + "documentation": "

Provides detailed information about a policy finding.

" + }, + "PutClassificationExportConfigurationRequest": { + "type": "structure", + "members": { + "configuration": { + "shape": "ClassificationExportConfiguration", + "locationName": "configuration", + "documentation": "

The location to store data classification results in, and the encryption settings to use when storing results in that location.

" + } + }, + "required": [ + "configuration" + ] + }, + "PutClassificationExportConfigurationResponse": { + "type": "structure", + "members": { + "configuration": { + "shape": "ClassificationExportConfiguration", + "locationName": "configuration", + "documentation": "

The location where the data classification results are stored, and the encryption settings that are used when storing results in that location.

" + } + } + }, + "RelationshipStatus": { + "type": "string", + "documentation": "

The current status of the relationship between an account and an associated Amazon Macie master account (inviter account). Possible values are:

", + "enum": [ + "Enabled", + "Paused", + "Invited", + "Created", + "Removed", + "Resigned", + "EmailVerificationInProgress", + "EmailVerificationFailed" + ] + }, + "ReplicationDetails": { + "type": "structure", + "members": { + "replicated": { + "shape": "__boolean", + "locationName": "replicated", + "documentation": "

Specifies whether the bucket is configured to replicate one or more objects to any destination.

" + }, + "replicatedExternally": { + "shape": "__boolean", + "locationName": "replicatedExternally", + "documentation": "

Specifies whether the bucket is configured to replicate one or more objects to an AWS account that isn't part of the Amazon Macie organization.

" + }, + "replicationAccounts": { + "shape": "__listOf__string", + "locationName": "replicationAccounts", + "documentation": "

An array of AWS account IDs, one for each AWS account that the bucket is configured to replicate one or more objects to.

" + } + }, + "documentation": "

Provides information about settings that define whether one or more objects in an S3 bucket are replicated to S3 buckets for other AWS accounts and, if so, which accounts.

" + }, + "ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error that occurred.

" + } + }, + "documentation": "

Provides information about an error that occurred because a specified resource wasn't found.

", + "exception": true, + "error": { + "httpStatusCode": 404 + } + }, + "ResourcesAffected": { + "type": "structure", + "members": { + "s3Bucket": { + "shape": "S3Bucket", + "locationName": "s3Bucket", + "documentation": "

An array of objects, one for each S3 bucket that the finding applies to. Each object provides a set of metadata about an affected S3 bucket.

" + }, + "s3Object": { + "shape": "S3Object", + "locationName": "s3Object", + "documentation": "

An array of objects, one for each S3 object that the finding applies to. Each object provides a set of metadata about an affected S3 object.

" + } + }, + "documentation": "

Provides information about the resources that a finding applies to.

" + }, + "S3Bucket": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the bucket.

" + }, + "createdAt": { + "shape": "__timestampIso8601", + "locationName": "createdAt", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the bucket was created.

" + }, + "defaultServerSideEncryption": { + "shape": "ServerSideEncryption", + "locationName": "defaultServerSideEncryption", + "documentation": "

The server-side encryption settings for the bucket.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

The name of the bucket.

" + }, + "owner": { + "shape": "S3BucketOwner", + "locationName": "owner", + "documentation": "

The display name and account identifier for the user who owns the bucket.

" + }, + "publicAccess": { + "shape": "BucketPublicAccess", + "locationName": "publicAccess", + "documentation": "

The permissions settings that determine whether the bucket is publicly accessible.

" + }, + "tags": { + "shape": "KeyValuePairList", + "locationName": "tags", + "documentation": "

The tags that are associated with the bucket.

" + } + }, + "documentation": "

Provides information about an S3 bucket that a finding applies to.

" + }, + "S3BucketDefinitionForJob": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The unique identifier for the AWS account that owns one or more of the buckets. If specified, the job analyzes objects in all the buckets that are owned by the account and meet other conditions specified for the job.

" + }, + "buckets": { + "shape": "__listOf__string", + "locationName": "buckets", + "documentation": "

An array that lists the names of the buckets.

" + } + }, + "documentation": "

Specifies which S3 buckets contain the objects that a classification job analyzes.

" + }, + "S3BucketOwner": { + "type": "structure", + "members": { + "displayName": { + "shape": "__string", + "locationName": "displayName", + "documentation": "

The display name of the user who owns the bucket.

" + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The AWS account ID for the user who owns the bucket.

" + } + }, + "documentation": "

Provides information about the user who owns an S3 bucket.

" + }, + "S3Destination": { + "type": "structure", + "members": { + "bucketName": { + "shape": "__string", + "locationName": "bucketName", + "documentation": "

The name of the bucket.

" + }, + "keyPrefix": { + "shape": "__string", + "locationName": "keyPrefix", + "documentation": "

The path prefix to use in the path to the location in the bucket. This prefix specifies where to store classification results in the bucket.

" + }, + "kmsKeyArn": { + "shape": "__string", + "locationName": "kmsKeyArn", + "documentation": "

The Amazon Resource Name (ARN) of the AWS Key Management Service customer master key (CMK) to use for encryption of the results. This must be the ARN of an existing CMK that's in the same AWS Region as the bucket.

" + } + }, + "documentation": "

Specifies an S3 bucket to store data classification results in, and the encryption settings to use when storing results in that bucket.

", + "required": [ + "bucketName", + "kmsKeyArn" + ] + }, + "S3JobDefinition": { + "type": "structure", + "members": { + "bucketDefinitions": { + "shape": "__listOfS3BucketDefinitionForJob", + "locationName": "bucketDefinitions", + "documentation": "

An array of objects, one for each bucket that contains objects to analyze.

" + }, + "scoping": { + "shape": "Scoping", + "locationName": "scoping", + "documentation": "

A JobScopeTerm object that specifies conditions for including or excluding objects from the job.

" + } + }, + "documentation": "

Specifies which S3 buckets contain the objects that a classification job analyzes, and the scope of that analysis.

" + }, + "S3Object": { + "type": "structure", + "members": { + "bucketArn": { + "shape": "__string", + "locationName": "bucketArn", + "documentation": "

The Amazon Resource Name (ARN) of the bucket that contains the object.

" + }, + "eTag": { + "shape": "__string", + "locationName": "eTag", + "documentation": "

The entity tag (ETag) that identifies the affected version of the object. If the object was overwritten or changed after Amazon Macie produced the finding, this value might be different from the current ETag for the object.

" + }, + "extension": { + "shape": "__string", + "locationName": "extension", + "documentation": "

The file extension of the object. If the object doesn't have a file extension, this value is \"\".

" + }, + "key": { + "shape": "__string", + "locationName": "key", + "documentation": "

The full key (name) that's assigned to the object.

" + }, + "lastModified": { + "shape": "__timestampIso8601", + "locationName": "lastModified", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the object was last modified.

" + }, + "path": { + "shape": "__string", + "locationName": "path", + "documentation": "

The path to the object, including the full key (name).

" + }, + "publicAccess": { + "shape": "__boolean", + "locationName": "publicAccess", + "documentation": "

Specifies whether the object is publicly accessible due to the combination of permissions settings that apply to the object.

" + }, + "serverSideEncryption": { + "shape": "ServerSideEncryption", + "locationName": "serverSideEncryption", + "documentation": "

The server-side encryption settings for the object.

" + }, + "size": { + "shape": "__long", + "locationName": "size", + "documentation": "

The total storage size, in bytes, of the object.

" + }, + "storageClass": { + "shape": "StorageClass", + "locationName": "storageClass", + "documentation": "

The storage class of the object.

" + }, + "tags": { + "shape": "KeyValuePairList", + "locationName": "tags", + "documentation": "

The tags that are associated with the object.

" + }, + "versionId": { + "shape": "__string", + "locationName": "versionId", + "documentation": "

The identifier for the affected version of the object.

" + } + }, + "documentation": "

Provides information about an S3 object that a finding applies to.

" + }, + "ScopeFilterKey": { + "type": "string", + "documentation": "

The property to use in a condition that determines which objects are analyzed by a classification job. Valid values are:

", + "enum": [ + "BUCKET_CREATION_DATE", + "OBJECT_EXTENSION", + "OBJECT_LAST_MODIFIED_DATE", + "OBJECT_SIZE", + "TAG" + ] + }, + "Scoping": { + "type": "structure", + "documentation": "

An object that specifies conditions for including or excluding objects from the job.

", + "members": { + "excludes": { + "shape": "JobScopingBlock", + "locationName": "excludes", + "documentation": "

Reserved for future use.

" + }, + "includes": { + "shape": "JobScopingBlock", + "locationName": "includes", + "documentation": "

Reserved for future use.

" + } + } + }, + "SensitiveData": { + "type": "list", + "documentation": "

Provides information about the category and number of occurrences of sensitive data that produced a finding.

", + "member": { + "shape": "SensitiveDataItem" + } + }, + "SensitiveDataItem": { + "type": "structure", + "members": { + "category": { + "shape": "SensitiveDataItemCategory", + "locationName": "category", + "documentation": "

The category of sensitive data that was detected. For example: FINANCIAL_INFORMATION, for financial information such as credit card numbers; PERSONAL_INFORMATION, for personally identifiable information such as full names and mailing addresses; or, CUSTOM_IDENTIFIER, for data that was detected by a custom data identifier.

" + }, + "detections": { + "shape": "DefaultDetections", + "locationName": "detections", + "documentation": "

An array of objects, one for each type of sensitive data that was detected. Each object reports the number of occurrences of a specific type of sensitive data that was detected.

" + }, + "totalCount": { + "shape": "__long", + "locationName": "totalCount", + "documentation": "

The total number of occurrences of the sensitive data that was detected.

" + } + }, + "documentation": "

Provides information about the category, type, and number of occurrences of sensitive data that produced a finding.

" + }, + "SensitiveDataItemCategory": { + "type": "string", + "documentation": "

The category of sensitive data that was detected and produced the finding.

", + "enum": [ + "FINANCIAL_INFORMATION", + "PERSONAL_INFORMATION", + "CREDENTIALS", + "CUSTOM_IDENTIFIER" + ] + }, + "ServerSideEncryption": { + "type": "structure", + "members": { + "encryptionType": { + "shape": "EncryptionType", + "locationName": "encryptionType", + "documentation": "

The server-side encryption algorithm that's used when storing data in the bucket or object. If encryption is disabled for the bucket or object, this value is NONE.

" + }, + "kmsMasterKeyId": { + "shape": "__string", + "locationName": "kmsMasterKeyId", + "documentation": "

The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) master key that's used to encrypt the bucket or object. This value is null if KMS isn't used to encrypt the bucket or object.

" + } + }, + "documentation": "

Provides information about the server-side encryption settings for an S3 bucket or object.

" + }, + "ServiceLimit": { + "type": "structure", + "members": { + "isServiceLimited": { + "shape": "__boolean", + "locationName": "isServiceLimited", + "documentation": "

Specifies whether the account has met the quota that corresponds to the metric specified by the UsageByAccount.type field in the response.

" + }, + "unit": { + "shape": "Unit", + "locationName": "unit", + "documentation": "

The unit of measurement for the value specified by the value field.

" + }, + "value": { + "shape": "__long", + "locationName": "value", + "documentation": "

The value for the metric specified by the UsageByAccount.type field in the response.

" + } + }, + "documentation": "

Specifies a current quota for an account.

" + }, + "ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error that occurred.

" + } + }, + "documentation": "

Provides information about an error that occurred due to one or more service quotas for an account.

", + "exception": true, + "error": { + "httpStatusCode": 402 + } + }, + "SessionContext": { + "type": "structure", + "members": { + "attributes": { + "shape": "SessionContextAttributes", + "locationName": "attributes", + "documentation": "

The date and time when the credentials were issued, and whether the credentials were authenticated with a multi-factor authentication (MFA) device.

" + }, + "sessionIssuer": { + "shape": "SessionIssuer", + "locationName": "sessionIssuer", + "documentation": "

The source and type of credentials that the entity obtained.

" + } + }, + "documentation": "

Provides information about a session that was created for an entity that performed an action by using temporary security credentials.

" + }, + "SessionContextAttributes": { + "type": "structure", + "members": { + "creationDate": { + "shape": "__timestampIso8601", + "locationName": "creationDate", + "documentation": "

The date and time, in ISO 8601 format, when the credentials were issued.

" + }, + "mfaAuthenticated": { + "shape": "__boolean", + "locationName": "mfaAuthenticated", + "documentation": "

Specifies whether the credentials were authenticated with a multi-factor authentication (MFA) device.

" + } + }, + "documentation": "

Provides information about the context in which temporary security credentials were issued to an entity.

" + }, + "SessionIssuer": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The account that owns the entity that was used to get the credentials.

" + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the source account, IAM user, or role that was used to get the credentials.

" + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

The internal identifier for the entity that was used to get the credentials.

" + }, + "type": { + "shape": "__string", + "locationName": "type", + "documentation": "

The source of the temporary security credentials, such as Root, IAMUser, or Role.

" + }, + "userName": { + "shape": "__string", + "locationName": "userName", + "documentation": "

The name or alias of the user or role that issued the session. This value is null if the credentials were obtained from a root account that doesn't have an alias.

" + } + }, + "documentation": "

Provides information about the source and type of temporary security credentials that were issued to an entity.

" + }, + "Severity": { + "type": "structure", + "members": { + "description": { + "shape": "SeverityDescription", + "locationName": "description", + "documentation": "

The textual representation of the severity value, such as Low or High.

" + }, + "score": { + "shape": "__long", + "locationName": "score", + "documentation": "

The numeric score for the severity value, ranging from 0 (least severe) to 4 (most severe).

" + } + }, + "documentation": "

Provides the numeric score and textual representation of a severity value.

" + }, + "SeverityDescription": { + "type": "string", + "documentation": "

The textual representation of the finding's severity. Valid values are:

", + "enum": [ + "Low", + "Medium", + "High" + ] + }, + "SharedAccess": { + "type": "string", + "enum": [ + "EXTERNAL", + "INTERNAL", + "NOT_SHARED" + ] + }, + "SimpleScopeTerm": { + "type": "structure", + "members": { + "comparator": { + "shape": "JobComparator", + "locationName": "comparator", + "documentation": "

The operator to use in the condition.

" + }, + "key": { + "shape": "ScopeFilterKey", + "locationName": "key", + "documentation": "

The property to use in the condition.

" + }, + "values": { + "shape": "__listOf__string", + "locationName": "values", + "documentation": "

An array that lists one or more values to use in the condition.

" + } + }, + "documentation": "

Specifies a property-based condition that determines whether an object is included or excluded from a classification job.

" + }, + "SortCriteria": { + "type": "structure", + "members": { + "attributeName": { + "shape": "__string", + "locationName": "attributeName", + "documentation": "

The name of the property to sort the results by. This value can be the name of any property that Amazon Macie defines for a finding.

" + }, + "orderBy": { + "shape": "OrderBy", + "locationName": "orderBy", + "documentation": "

The sort order to apply to the results, based on the value for the property specified by the attributeName property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

" + } + }, + "documentation": "

Specifies criteria for sorting the results of a request for information about findings.

" + }, + "Statistics": { + "type": "structure", + "members": { + "approximateNumberOfObjectsToProcess": { + "shape": "__double", + "locationName": "approximateNumberOfObjectsToProcess", + "documentation": "

The approximate number of objects that the job has yet to process during its current run.

" + }, + "numberOfRuns": { + "shape": "__double", + "locationName": "numberOfRuns", + "documentation": "

The number of times that the job has run.

" + } + }, + "documentation": "

Provides processing statistics for a classification job.

" + }, + "StorageClass": { + "type": "string", + "documentation": "

The storage class of the S3 bucket or object. Valid values are:

", + "enum": [ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "INTELLIGENT_TIERING", + "DEEP_ARCHIVE", + "ONEZONE_IA", + "GLACIER" + ] + }, + "TagMap": { + "type": "map", + "documentation": "

A string-to-string map of key-value pairs that specifies the tags (keys and values) for a classification job, custom data identifier, findings filter, or member account.

", + "key": { + "shape": "__string" + }, + "value": { + "shape": "__string" + } + }, + "TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resourceArn", + "documentation": "

The Amazon Resource Name (ARN) of the classification job, custom data identifier, findings filter, or member account.

" + }, + "tags": { + "shape": "TagMap", + "locationName": "tags", + "documentation": "

A map of key-value pairs that specifies the tags to associate with the resource.

A resource can have a maximum of 50 tags. Each tag consists of a required tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

" + } + }, + "required": [ + "resourceArn", + "tags" + ] + }, + "TagResourceResponse": { + "type": "structure", + "members": {} + }, + "TagScopeTerm": { + "type": "structure", + "members": { + "comparator": { + "shape": "JobComparator", + "locationName": "comparator", + "documentation": "

The operator to use in the condition.

" + }, + "key": { + "shape": "__string", + "locationName": "key", + "documentation": "

The tag key to use in the condition.

" + }, + "tagValues": { + "shape": "__listOfTagValuePair", + "locationName": "tagValues", + "documentation": "

The tag key and value pairs to use in the condition.

" + }, + "target": { + "shape": "TagTarget", + "locationName": "target", + "documentation": "

The type of object to apply the condition to.

" + } + }, + "documentation": "

Specifies a tag-based condition that determines whether an object is included or excluded from a classification job.

" + }, + "TagTarget": { + "type": "string", + "documentation": "

The type of object to apply a tag-based condition to. Valid values are:

", + "enum": [ + "S3_OBJECT" + ] + }, + "TagValuePair": { + "type": "structure", + "members": { + "key": { + "shape": "__string", + "locationName": "key", + "documentation": "

The value for the tag key to use in the condition.

" + }, + "value": { + "shape": "__string", + "locationName": "value", + "documentation": "

The tag value, associated with the specified tag key, to use in the condition.

" + } + }, + "documentation": "

Specifies a tag key and value, as a pair, to use in a tag-based condition for a classification job.

" + }, + "TestCustomDataIdentifierRequest": { + "type": "structure", + "members": { + "ignoreWords": { + "shape": "__listOf__string", + "locationName": "ignoreWords", + "documentation": "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4 - 90 characters.

" + }, + "keywords": { + "shape": "__listOf__string", + "locationName": "keywords", + "documentation": "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 4 - 90 characters.

" + }, + "maximumMatchDistance": { + "shape": "__integer", + "locationName": "maximumMatchDistance", + "documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1 - 300 characters. The default value is 50.

" + }, + "regex": { + "shape": "__string", + "locationName": "regex", + "documentation": "

The regular expression (regex) that defines the pattern to match. The expression can contain as many as 500 characters.

" + }, + "sampleText": { + "shape": "__string", + "locationName": "sampleText", + "documentation": "

The sample text to inspect by using the custom data identifier. The text can contain as many as 1,000 characters.

" + } + }, + "required": [ + "regex", + "sampleText" + ] + }, + "TestCustomDataIdentifierResponse": { + "type": "structure", + "members": { + "matchCount": { + "shape": "__integer", + "locationName": "matchCount", + "documentation": "

The number of instances of sample text that matched the detection criteria specified in the custom data identifier.

" + } + } + }, + "ThrottlingException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error that occurred.

" + } + }, + "documentation": "

Provides information about an error that occurred because too many requests were sent during a certain amount of time.

", + "exception": true, + "error": { + "httpStatusCode": 429 + } + }, + "Unit": { + "type": "string", + "enum": [ + "TERABYTES" + ] + }, + "UnprocessedAccount": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The AWS account ID for the account that the request applies to.

" + }, + "errorCode": { + "shape": "ErrorCode", + "locationName": "errorCode", + "documentation": "

The source of the issue or delay in processing the request.

" + }, + "errorMessage": { + "shape": "__string", + "locationName": "errorMessage", + "documentation": "

The reason why the request hasn't been processed.

" + } + }, + "documentation": "

Provides information about an account-related request that hasn't been processed.

" + }, + "UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resourceArn", + "documentation": "

The Amazon Resource Name (ARN) of the classification job, custom data identifier, findings filter, or member account.

" + }, + "tagKeys": { + "shape": "__listOf__string", + "location": "querystring", + "locationName": "tagKeys", + "documentation": "

The key of the tag to remove from the resource. To remove multiple tags, append the tagKeys parameter and argument for each additional tag to remove, separated by an ampersand (&).

" + } + }, + "required": [ + "tagKeys", + "resourceArn" + ] + }, + "UntagResourceResponse": { + "type": "structure", + "members": {} + }, + "UpdateClassificationJobRequest": { + "type": "structure", + "members": { + "jobId": { + "shape": "__string", + "location": "uri", + "locationName": "jobId", + "documentation": "

The unique identifier for the classification job.

" + }, + "jobStatus": { + "shape": "JobStatus", + "locationName": "jobStatus", + "documentation": "

The status to change the job's status to. The only supported value is CANCELLED, which cancels the job completely.

" + } + }, + "required": [ + "jobId", + "jobStatus" + ] + }, + "UpdateClassificationJobResponse": { + "type": "structure", + "members": {} + }, + "UpdateFindingsFilterRequest": { + "type": "structure", + "members": { + "action": { + "shape": "FindingsFilterAction", + "locationName": "action", + "documentation": "

The action to perform on findings that meet the filter criteria (findingCriteria). Valid values are: ARCHIVE, suppress (automatically archive) the findings; and, NOOP, don't perform any action on the findings.

" + }, + "description": { + "shape": "__string", + "locationName": "description", + "documentation": "

A custom description of the filter. The description can contain as many as 512 characters.

We strongly recommend that you avoid including any sensitive data in the description of a filter. Other users might be able to see the filter's description, depending on the actions that they're allowed to perform in Amazon Macie.

" + }, + "findingCriteria": { + "shape": "FindingCriteria", + "locationName": "findingCriteria", + "documentation": "

The criteria to use to filter findings.

" + }, + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

The unique identifier for the Amazon Macie resource or account that the request applies to.

" + }, + "name": { + "shape": "__string", + "locationName": "name", + "documentation": "

A custom name for the filter. The name must contain at least 3 characters and can contain as many as 64 characters.

We strongly recommend that you avoid including any sensitive data in the name of a filter. Other users might be able to see the filter's name, depending on the actions that they're allowed to perform in Amazon Macie.

" + }, + "position": { + "shape": "__integer", + "locationName": "position", + "documentation": "

The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings.

" + } + }, + "required": [ + "id" + ] + }, + "UpdateFindingsFilterResponse": { + "type": "structure", + "members": { + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

The Amazon Resource Name (ARN) of the filter that was updated.

" + }, + "id": { + "shape": "__string", + "locationName": "id", + "documentation": "

The unique identifier for the filter that was updated.

" + } + } + }, + "UpdateMacieSessionRequest": { + "type": "structure", + "members": { + "findingPublishingFrequency": { + "shape": "FindingPublishingFrequency", + "locationName": "findingPublishingFrequency", + "documentation": "Specifies how often to publish updates to policy findings for the account. This includes publishing updates to AWS Security Hub and Amazon EventBridge (formerly called Amazon CloudWatch Events)." + }, + "status": { + "shape": "MacieStatus", + "locationName": "status", + "documentation": "

Specifies whether to change the status of the account. Valid values are: ENABLED, resume all Amazon Macie activities for the account; and, PAUSED, suspend all Macie activities for the account.

" + } + } + }, + "UpdateMacieSessionResponse": { + "type": "structure", + "members": {} + }, + "UpdateMemberSessionRequest": { + "type": "structure", + "members": { + "id": { + "shape": "__string", + "location": "uri", + "locationName": "id", + "documentation": "

The unique identifier for the Amazon Macie resource or account that the request applies to.

" + }, + "status": { + "shape": "MacieStatus", + "locationName": "status", + "documentation": "

Specifies the new status for the account. Valid values are: ENABLED, resume all Amazon Macie activities for the account; and, PAUSED, suspend all Macie activities for the account.

" + } + }, + "required": [ + "id", + "status" + ] + }, + "UpdateMemberSessionResponse": { + "type": "structure", + "members": {} + }, + "UpdateOrganizationConfigurationRequest": { + "type": "structure", + "members": { + "autoEnable": { + "shape": "__boolean", + "locationName": "autoEnable", + "documentation": "

Specifies whether Amazon Macie is enabled automatically for each account, when the account is added to the AWS organization.

" + } + }, + "required": [ + "autoEnable" + ] + }, + "UpdateOrganizationConfigurationResponse": { + "type": "structure", + "members": {} + }, + "UsageByAccount": { + "type": "structure", + "members": { + "currency": { + "shape": "Currency", + "locationName": "currency", + "documentation": "

The type of currency that the value for the metric (estimatedCost) is reported in.

" + }, + "estimatedCost": { + "shape": "__string", + "locationName": "estimatedCost", + "documentation": "

The estimated value for the metric.

" + }, + "serviceLimit": { + "shape": "ServiceLimit", + "locationName": "serviceLimit", + "documentation": "

The current value for the quota that corresponds to the metric specified by the type field.

" + }, + "type": { + "shape": "UsageType", + "locationName": "type", + "documentation": "

The name of the metric. Possible values are: DATA_INVENTORY_EVALUATION, for monitoring S3 buckets; and, SENSITIVE_DATA_DISCOVERY, for analyzing sensitive data.

" + } + }, + "documentation": "

Provides data for a specific usage metric and the corresponding quota for an account. The value for the metric is an aggregated value that reports usage during the past 30 days.

" + }, + "UsageRecord": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

The AWS account ID for the account that the data applies to.

" + }, + "freeTrialStartDate": { + "shape": "__timestampIso8601", + "locationName": "freeTrialStartDate", + "documentation": "

The date and time, in UTC and extended ISO 8601 format, when the free trial period started for the account. This value is null if the account didn't participate in the free trial.

" + }, + "usage": { + "shape": "__listOfUsageByAccount", + "locationName": "usage", + "documentation": "

An array of objects that contains usage data and quotas for the account. Each object contains the data for a specific usage metric and the corresponding quota.

" + } + }, + "documentation": "

Provides quota and aggregated usage data for an account.

" + }, + "UsageStatisticsFilter": { + "type": "structure", + "members": { + "key": { + "shape": "UsageStatisticsFilterKey", + "locationName": "key", + "documentation": "

The field to use to filter the results. The only supported value is accountId.

" + }, + "values": { + "shape": "__listOf__string", + "locationName": "values", + "documentation": "

An array that lists the AWS account ID for each account to include in the results.

" + } + }, + "documentation": "

Specifies criteria for filtering the results of a query for account quotas and usage data.

" + }, + "UsageStatisticsFilterKey": { + "type": "string", + "documentation": "

The field to use to filter the results of a query for account quotas and usage data.

", + "enum": [ + "accountId" + ] + }, + "UsageStatisticsSortBy": { + "type": "structure", + "members": { + "key": { + "shape": "UsageStatisticsSortKey", + "locationName": "key", + "documentation": "

The field to sort the results by.

" + }, + "orderBy": { + "shape": "OrderBy", + "locationName": "orderBy", + "documentation": "

The sort order to apply to the results, based on the value for the field specified by the key property. Valid values are: ASC, sort the results in ascending order; and, DESC, sort the results in descending order.

" + } + }, + "documentation": "

Specifies criteria for sorting the results of a query for account quotas and usage data.

" + }, + "UsageStatisticsSortKey": { + "type": "string", + "documentation": "

The field to use to sort the results of a query for account quotas and usage data.

", + "enum": [ + "accountId", + "total" + ] + }, + "UsageTotal": { + "type": "structure", + "members": { + "currency": { + "shape": "Currency", + "locationName": "currency", + "documentation": "

The type of currency that the value for the metric (estimatedCost) is reported in.

" + }, + "estimatedCost": { + "shape": "__string", + "locationName": "estimatedCost", + "documentation": "

The estimated value for the metric.

" + }, + "type": { + "shape": "UsageType", + "locationName": "type", + "documentation": "

The name of the metric. Possible values are: DATA_INVENTORY_EVALUATION, for monitoring S3 buckets; and, SENSITIVE_DATA_DISCOVERY, for analyzing sensitive data.

" + } + }, + "documentation": "

Provides aggregated data for a usage metric. The value for the metric reports usage data for an account during the past 30 days.

" + }, + "UsageType": { + "type": "string", + "documentation": "

The name of a usage metric for an account. Possible values are:

", + "enum": [ + "DATA_INVENTORY_EVALUATION", + "SENSITIVE_DATA_DISCOVERY" + ] + }, + "UserIdentity": { + "type": "structure", + "members": { + "assumedRole": { + "shape": "AssumedRole", + "locationName": "assumedRole" + }, + "awsAccount": { + "shape": "AwsAccount", + "locationName": "awsAccount" + }, + "awsService": { + "shape": "AwsService", + "locationName": "awsService" + }, + "federatedUser": { + "shape": "FederatedUser", + "locationName": "federatedUser" + }, + "iamUser": { + "shape": "IamUser", + "locationName": "iamUser" + }, + "root": { + "shape": "UserIdentityRoot", + "locationName": "root" + }, + "type": { + "shape": "UserIdentityType", + "locationName": "type" + } + }, + "documentation": "

The name and type of entity who performed the action on the affected resource.

" + }, + "UserIdentityRoot": { + "type": "structure", + "members": { + "accountId": { + "shape": "__string", + "locationName": "accountId", + "documentation": "

Reserved for future use.

" + }, + "arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "

Reserved for future use.

" + }, + "principalId": { + "shape": "__string", + "locationName": "principalId", + "documentation": "

Reserved for future use.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "UserIdentityType": { + "type": "string", + "documentation": "

Reserved for future use.

", + "enum": [ + "AssumedRole", + "IAMUser", + "FederatedUser", + "Root", + "AWSAccount", + "AWSService" + ] + }, + "ValidationException": { + "type": "structure", + "members": { + "message": { + "shape": "__string", + "locationName": "message", + "documentation": "

The explanation of the error that occurred.

" + } + }, + "documentation": "

Provides information about an error that occurred due to a syntax error in a request.

", + "exception": true, + "error": { + "httpStatusCode": 400 + } + }, + "WeeklySchedule": { + "type": "structure", + "members": { + "dayOfWeek": { + "shape": "DayOfWeek", + "locationName": "dayOfWeek", + "documentation": "

Run the job once a week, on a specific day of the week. Valid values are: MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, and SUNDAY.

" + } + }, + "documentation": "

Reserved for future use.

" + }, + "__boolean": { + "type": "boolean" + }, + "__double": { + "type": "double" + }, + "__integer": { + "type": "integer" + }, + "__listOfAdminAccount": { + "type": "list", + "member": { + "shape": "AdminAccount" + } + }, + "__listOfBatchGetCustomDataIdentifierSummary": { + "type": "list", + "member": { + "shape": "BatchGetCustomDataIdentifierSummary" + } + }, + "__listOfBucketMetadata": { + "type": "list", + "member": { + "shape": "BucketMetadata" + } + }, + "__listOfCustomDataIdentifierSummary": { + "type": "list", + "member": { + "shape": "CustomDataIdentifierSummary" + } + }, + "__listOfFinding": { + "type": "list", + "member": { + "shape": "Finding" + } + }, + "__listOfFindingType": { + "type": "list", + "member": { + "shape": "FindingType" + } + }, + "__listOfFindingsFilterListItem": { + "type": "list", + "member": { + "shape": "FindingsFilterListItem" + } + }, + "__listOfGroupCount": { + "type": "list", + "member": { + "shape": "GroupCount" + } + }, + "__listOfInvitation": { + "type": "list", + "member": { + "shape": "Invitation" + } + }, + "__listOfJobScopeTerm": { + "type": "list", + "member": { + "shape": "JobScopeTerm" + } + }, + "__listOfJobSummary": { + "type": "list", + "member": { + "shape": "JobSummary" + } + }, + "__listOfKeyValuePair": { + "type": "list", + "member": { + "shape": "KeyValuePair" + } + }, + "__listOfListJobsFilterTerm": { + "type": "list", + "member": { + "shape": "ListJobsFilterTerm" + } + }, + "__listOfMember": { + "type": "list", + "member": { + "shape": "Member" + } + }, + "__listOfS3BucketDefinitionForJob": { + "type": "list", + "member": { + "shape": "S3BucketDefinitionForJob" + } + }, + "__listOfTagValuePair": { + "type": "list", + "member": { + "shape": "TagValuePair" + } + }, + "__listOfUnprocessedAccount": { + "type": "list", + "member": { + "shape": "UnprocessedAccount" + } + }, + "__listOfUsageByAccount": { + "type": "list", + "member": { + "shape": "UsageByAccount" + } + }, + "__listOfUsageRecord": { + "type": "list", + "member": { + "shape": "UsageRecord" + } + }, + "__listOfUsageStatisticsFilter": { + "type": "list", + "member": { + "shape": "UsageStatisticsFilter" + } + }, + "__listOfUsageTotal": { + "type": "list", + "member": { + "shape": "UsageTotal" + } + }, + "__listOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "__long": { + "type": "long" + }, + "__string": { + "type": "string" + }, + "__timestampIso8601": { + "type": "timestamp", + "timestampFormat": "iso8601" + }, + "__timestampUnix": { + "type": "timestamp", + "timestampFormat": "unixTimestamp" + } + }, + "documentation": "

Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS. Macie automates the discovery of sensitive data, such as PII and intellectual property, to provide you with insight into the data that your organization stores in AWS. Macie also provides an inventory of your Amazon S3 buckets, which it continually monitors for you. If Macie detects sensitive data or potential data access issues, it generates detailed findings for you to review and act upon as necessary.

" +} \ No newline at end of file diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index ef7456633d78..085db7eac258 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index 27aaf87feb2d..e170f9cb7690 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json b/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json index 1a01518c04ae..103515df6d4f 100644 --- a/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json +++ b/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json @@ -116,7 +116,7 @@ {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

This operation allows you to request changes in your entities.

" + "documentation":"

This operation allows you to request changes for your entities. Within a single ChangeSet, you cannot start the same change type against the same entity multiple times. Additionally, when a ChangeSet is running, all the entities targeted by the different changes are locked until the ChangeSet has completed (either succeeded, cancelled, or failed). If you try to start a ChangeSet containing a change against an entity that is already locked, you will receive a ResourceInUseException.

For example, you cannot start the ChangeSet described in the example below because it contains two changes to execute the same change type (AddRevisions) against the same entity (entity-id@1).

" } }, "shapes":{ @@ -268,6 +268,10 @@ "shape":"Entity", "documentation":"

The entity to be changed.

" }, + "Details":{ + "shape":"Json", + "documentation":"

This object contains details specific to the change type of the requested change.

" + }, "ErrorDetailList":{ "shape":"ErrorDetailList", "documentation":"

An array of ErrorDetail objects associated with the change.

" @@ -417,7 +421,7 @@ "members":{ "Name":{ "shape":"StringValue", - "documentation":"

The name for the entity. This value is not unique. It is defined by the provider.

" + "documentation":"

The name for the entity. This value is not unique. It is defined by the seller.

" }, "EntityType":{ "shape":"EntityType", @@ -437,7 +441,7 @@ }, "Visibility":{ "shape":"StringValue", - "documentation":"

The visibility status of the entity to subscribers. This value can be Public (everyone can view the entity), Limited (the entity is visible to limited accounts only), or Restricted (the entity was published and then unpublished and only existing subscribers can view it).

" + "documentation":"

The visibility status of the entity to buyers. This value can be Public (everyone can view the entity), Limited (the entity is visible to limited accounts only), or Restricted (the entity was published and then unpublished and only existing buyers can view it).

" } }, "documentation":"

This object is a container for common summary information about the entity. The summary doesn't contain the whole entity structure, but it does contain information common across all entities.

" @@ -532,7 +536,7 @@ }, "Sort":{ "shape":"Sort", - "documentation":"

An object that contains two attributes, sortBy and sortOrder.

" + "documentation":"

An object that contains two attributes, SortBy and SortOrder.

" }, "MaxResults":{ "shape":"MaxResultInteger", @@ -578,7 +582,7 @@ }, "Sort":{ "shape":"Sort", - "documentation":"

An object that contains two attributes, sortBy and sortOrder.

" + "documentation":"

An object that contains two attributes, SortBy and SortOrder.

" }, "NextToken":{ "shape":"NextToken", @@ -683,7 +687,7 @@ "documentation":"

The sorting order. Can be ASCENDING or DESCENDING. The default value is DESCENDING.

" } }, - "documentation":"

An object that contains two attributes, sortBy and sortOrder.

" + "documentation":"

An object that contains two attributes, SortBy and SortOrder.

" }, "SortBy":{ "type":"string", @@ -764,5 +768,5 @@ "min":1 } }, - "documentation":"

Catalog API actions allow you to create, describe, list, and delete changes to your published entities. An entity is a product or an offer on AWS Marketplace.

You can automate your entity update process by integrating the AWS Marketplace Catalog API with your AWS Marketplace product build or deployment pipelines. You can also create your own applications on top of the Catalog API to manage your products on AWS Marketplace.

" + "documentation":"

Catalog API actions allow you to manage your entities through list, describe, and update capabilities. An entity can be a product or an offer on AWS Marketplace.

You can automate your entity update process by integrating the AWS Marketplace Catalog API with your AWS Marketplace product build or deployment pipelines. You can also create your own applications on top of the Catalog API to manage your products on AWS Marketplace.

" } diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index b36818a906f4..9e7390121c99 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index f9a68dbb8a71..bac13e6b2efb 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index 4c842a2a36c8..ac0e039f6bf3 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/marketplacemetering/src/main/resources/codegen-resources/service-2.json b/services/marketplacemetering/src/main/resources/codegen-resources/service-2.json index 4b72bca72d5b..abe33baead0f 100644 --- a/services/marketplacemetering/src/main/resources/codegen-resources/service-2.json +++ b/services/marketplacemetering/src/main/resources/codegen-resources/service-2.json @@ -278,7 +278,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

AWS Marketplace does not support metering usage from the underlying platform. Currently, only Amazon ECS is supported.

", + "documentation":"

AWS Marketplace does not support metering usage from the underlying platform. Currently, Amazon ECS, Amazon EKS, and AWS Fargate are supported.

", "exception":true }, "ProductCode":{ @@ -442,5 +442,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"AWS Marketplace Metering Service

This reference provides descriptions of the low-level AWS Marketplace Metering Service API.

AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions.

Submitting Metering Records

  • MeterUsage- Submits the metering record for a Marketplace product. MeterUsage is called from an EC2 instance or a container running on EKS or ECS.

  • BatchMeterUsage- Submits the metering record for a set of customers. BatchMeterUsage is called from a software-as-a-service (SaaS) application.

Accepting New Customers

  • ResolveCustomer- Called by a SaaS application during the registration process. When a buyer visits your website during the registration process, the buyer submits a Registration Token through the browser. The Registration Token is resolved through this API to obtain a CustomerIdentifier and Product Code.

Entitlement and Metering for Paid Container Products

  • Paid container software products sold through AWS Marketplace must integrate with the AWS Marketplace Metering Service and call the RegisterUsage operation for software entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you can do so if you want to receive usage data in your seller reports. For more information on using the RegisterUsage operation, see Container-Based Products.

BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to verify that the SaaS metering records that you sent are accurate by searching for records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit records over time. For more information, see the AWS CloudTrail User Guide .

" + "documentation":"AWS Marketplace Metering Service

This reference provides descriptions of the low-level AWS Marketplace Metering Service API.

AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions.

For information on the permissions you need to use this API, see AWS Marketing metering and entitlement API permissions in the AWS Marketplace Seller Guide.

Submitting Metering Records

  • MeterUsage- Submits the metering record for a Marketplace product. MeterUsage is called from an EC2 instance or a container running on EKS or ECS.

  • BatchMeterUsage- Submits the metering record for a set of customers. BatchMeterUsage is called from a software-as-a-service (SaaS) application.

Accepting New Customers

  • ResolveCustomer- Called by a SaaS application during the registration process. When a buyer visits your website during the registration process, the buyer submits a Registration Token through the browser. The Registration Token is resolved through this API to obtain a CustomerIdentifier and Product Code.

Entitlement and Metering for Paid Container Products

  • Paid container software products sold through AWS Marketplace must integrate with the AWS Marketplace Metering Service and call the RegisterUsage operation for software entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you can do so if you want to receive usage data in your seller reports. For more information on using the RegisterUsage operation, see Container-Based Products.

BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to verify that the SaaS metering records that you sent are accurate by searching for records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit records over time. For more information, see the AWS CloudTrail User Guide .

" } diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index 7764ac4ace03..7be8942ff287 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconnect/src/main/resources/codegen-resources/service-2.json b/services/mediaconnect/src/main/resources/codegen-resources/service-2.json index 45802b6d623e..910ab221f94d 100644 --- a/services/mediaconnect/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconnect/src/main/resources/codegen-resources/service-2.json @@ -1068,6 +1068,11 @@ "shape": "__string", "locationName": "streamId", "documentation": "The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams." + }, + "VpcInterfaceAttachment": { + "shape": "VpcInterfaceAttachment", + "locationName": "vpcInterfaceAttachment", + "documentation": "The name of the VPC interface attachment to use for this output." } }, "documentation": "The output that you want to add to this flow.", @@ -1790,6 +1795,11 @@ "shape": "Transport", "locationName": "transport", "documentation": "Attributes related to the transport stream that are used in the output." + }, + "VpcInterfaceAttachment": { + "shape": "VpcInterfaceAttachment", + "locationName": "vpcInterfaceAttachment", + "documentation": "The name of the VPC interface attachment to use for this output." } }, "documentation": "The settings for an output.", @@ -2484,6 +2494,11 @@ "shape": "__string", "locationName": "streamId", "documentation": "The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams." + }, + "VpcInterfaceAttachment": { + "shape": "VpcInterfaceAttachment", + "locationName": "vpcInterfaceAttachment", + "documentation": "The name of the VPC interface attachment to use for this output." } }, "documentation": "The fields that you want to update in the output.", @@ -2659,6 +2674,17 @@ "Name" ] }, + "VpcInterfaceAttachment": { + "type": "structure", + "members": { + "VpcInterfaceName": { + "shape": "__string", + "locationName": "vpcInterfaceName", + "documentation": "The name of the VPC interface to use for this output." + } + }, + "documentation": "The settings for attaching a VPC interface to an output." + }, "VpcInterfaceRequest": { "type": "structure", "members": { diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index 150271fc81f5..d4b4105f8fb9 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index a131164bb62f..b20a4f070435 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -1264,7 +1264,7 @@ "documentation": "Specify the conditions when the service will run your job with accelerated transcoding." } }, - "documentation": "Accelerated transcoding can significantly speed up jobs with long, visually complex content. Outputs that use this feature incur pro-tier pricing. For information about feature limitations, see the AWS Elemental MediaConvert User Guide.", + "documentation": "Accelerated transcoding can significantly speed up jobs with long, visually complex content.", "required": [ "Mode" ] @@ -1392,6 +1392,8 @@ "AC3", "EAC3", "EAC3_ATMOS", + "VORBIS", + "OPUS", "PASSTHROUGH" ] }, @@ -1438,13 +1440,23 @@ "locationName": "mp3Settings", "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value MP3." }, + "OpusSettings": { + "shape": "OpusSettings", + "locationName": "opusSettings", + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value OPUS." + }, + "VorbisSettings": { + "shape": "VorbisSettings", + "locationName": "vorbisSettings", + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value Vorbis." + }, "WavSettings": { "shape": "WavSettings", "locationName": "wavSettings", "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value WAV." } }, - "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings" + "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings * VORBIS, VorbisSettings * OPUS, OpusSettings" }, "AudioDefaultSelection": { "type": "string", @@ -1480,7 +1492,7 @@ "CodecSettings": { "shape": "AudioCodecSettings", "locationName": "codecSettings", - "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings" + "documentation": "Audio codec settings (CodecSettings) under (AudioDescriptions) contains the group of settings related to audio encoding. The settings in this group vary depending on the value that you choose for Audio codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AAC, AacSettings * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings * VORBIS, VorbisSettings * OPUS, OpusSettings" }, "CustomLanguageCode": { "shape": "__stringPatternAZaZ23AZaZ", @@ -1602,7 +1614,7 @@ "documentation": "Enable this setting on one audio selector to set it as the default for the job. The service uses this default for outputs where it can't find the specified input audio. If you don't set a default, those outputs have no audio." }, "ExternalAudioFileInput": { - "shape": "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", + "shape": "__stringPatternS3WWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", "locationName": "externalAudioFileInput", "documentation": "Specifies audio data from an external file source." }, @@ -1694,7 +1706,7 @@ }, "Av1FramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -1739,7 +1751,7 @@ "FramerateConversionAlgorithm": { "shape": "Av1FramerateConversionAlgorithm", "locationName": "framerateConversionAlgorithm", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion." + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion." }, "FramerateDenominator": { "shape": "__integerMin1Max2147483647", @@ -1824,7 +1836,7 @@ }, "BillingTagsSource": { "type": "string", - "documentation": "Optional. Choose a tag type that AWS Billing and Cost Management will use to sort your AWS Elemental MediaConvert costs on any billing report that you set up. Any transcoding outputs that don't have an associated tag will appear in your billing report unsorted. If you don't choose a valid value for this field, your job outputs will appear on the billing report unsorted.", + "documentation": "The tag type that AWS Billing and Cost Management will use to sort your AWS Elemental MediaConvert costs on any billing report that you set up.", "enum": [ "QUEUE", "PRESET", @@ -2142,6 +2154,22 @@ }, "documentation": "Set up captions in your outputs by first selecting them from your input here." }, + "CaptionSourceFramerate": { + "type": "structure", + "members": { + "FramerateDenominator": { + "shape": "__integerMin1Max1001", + "locationName": "framerateDenominator", + "documentation": "Specify the denominator of the fraction that represents the frame rate for the setting Caption source frame rate (CaptionSourceFramerate). Use this setting along with the setting Framerate numerator (framerateNumerator)." + }, + "FramerateNumerator": { + "shape": "__integerMin1Max60000", + "locationName": "framerateNumerator", + "documentation": "Specify the numerator of the fraction that represents the frame rate for the setting Caption source frame rate (CaptionSourceFramerate). Use this setting along with the setting Framerate denominator (framerateDenominator)." + } + }, + "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction, using the settings Framerate numerator (framerateNumerator) and Framerate denominator (framerateDenominator). For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." + }, "CaptionSourceSettings": { "type": "structure", "members": { @@ -2638,6 +2666,11 @@ "shape": "MpdSettings", "locationName": "mpdSettings", "documentation": "Settings for MP4 segments in DASH" + }, + "MxfSettings": { + "shape": "MxfSettings", + "locationName": "mxfSettings", + "documentation": "MXF settings" } }, "documentation": "Container specific settings." @@ -2655,6 +2688,7 @@ "MP4", "MPD", "MXF", + "WEBM", "RAW" ] }, @@ -2664,7 +2698,7 @@ "AccelerationSettings": { "shape": "AccelerationSettings", "locationName": "accelerationSettings", - "documentation": "Accelerated transcoding can significantly speed up jobs with long, visually complex content. Outputs that use this feature incur pro-tier pricing. For information about feature limitations, see the AWS Elemental MediaConvert User Guide." + "documentation": "Optional. Accelerated transcoding can significantly speed up jobs with long, visually complex content. Outputs that use this feature incur pro-tier pricing. For information about feature limitations, see the AWS Elemental MediaConvert User Guide." }, "BillingTagsSource": { "shape": "BillingTagsSource", @@ -2674,18 +2708,23 @@ "ClientRequestToken": { "shape": "__string", "locationName": "clientRequestToken", - "documentation": "Idempotency token for CreateJob operation.", + "documentation": "Optional. Idempotency token for CreateJob operation.", "idempotencyToken": true }, + "HopDestinations": { + "shape": "__listOfHopDestination", + "locationName": "hopDestinations", + "documentation": "Optional. Use queue hopping to avoid overly long waits in the backlog of the queue that you submit your job to. Specify an alternate queue and the maximum time that your job will wait in the initial queue before hopping. For more information about this feature, see the AWS Elemental MediaConvert User Guide." + }, "JobTemplate": { "shape": "__string", "locationName": "jobTemplate", - "documentation": "When you create a job, you can either specify a job template or specify the transcoding settings individually" + "documentation": "Optional. When you create a job, you can either specify a job template or specify the transcoding settings individually." }, "Priority": { "shape": "__integerMinNegative50Max50", "locationName": "priority", - "documentation": "Specify the relative priority for this job. In any given queue, the service begins processing the job with the highest value first. When more than one job has the same priority, the service begins processing the job that you submitted first. If you don't specify a priority, the service uses the default value 0." + "documentation": "Optional. Specify the relative priority for this job. In any given queue, the service begins processing the job with the highest value first. When more than one job has the same priority, the service begins processing the job that you submitted first. If you don't specify a priority, the service uses the default value 0." }, "Queue": { "shape": "__string", @@ -2705,22 +2744,22 @@ "SimulateReservedQueue": { "shape": "SimulateReservedQueue", "locationName": "simulateReservedQueue", - "documentation": "Enable this setting when you run a test job to estimate how many reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert runs your job from an on-demand queue with similar performance to what you will see with one RTS in a reserved queue. This setting is disabled by default." + "documentation": "Optional. Enable this setting when you run a test job to estimate how many reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert runs your job from an on-demand queue with similar performance to what you will see with one RTS in a reserved queue. This setting is disabled by default." }, "StatusUpdateInterval": { "shape": "StatusUpdateInterval", "locationName": "statusUpdateInterval", - "documentation": "Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch Events. Set the interval, in seconds, between status updates. MediaConvert sends an update at this interval from the time the service begins processing your job to the time it completes the transcode or encounters an error." + "documentation": "Optional. Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch Events. Set the interval, in seconds, between status updates. MediaConvert sends an update at this interval from the time the service begins processing your job to the time it completes the transcode or encounters an error." }, "Tags": { "shape": "__mapOf__string", "locationName": "tags", - "documentation": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key." + "documentation": "Optional. The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key." }, "UserMetadata": { "shape": "__mapOf__string", "locationName": "userMetadata", - "documentation": "User-defined metadata that you want to associate with an MediaConvert job. You specify metadata in key/value pairs." + "documentation": "Optional. User-defined metadata that you want to associate with an MediaConvert job. You specify metadata in key/value pairs." } }, "required": [ @@ -2756,6 +2795,11 @@ "locationName": "description", "documentation": "Optional. A description of the job template you are creating." }, + "HopDestinations": { + "shape": "__listOfHopDestination", + "locationName": "hopDestinations", + "documentation": "Optional. Use queue hopping to avoid overly long waits in the backlog of the queue that you submit your job to. Specify an alternate queue and the maximum time that your job will wait in the initial queue before hopping. For more information about this feature, see the AWS Elemental MediaConvert User Guide." + }, "Name": { "shape": "__string", "locationName": "name", @@ -4080,6 +4124,11 @@ "locationName": "convert608To708", "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." }, + "Framerate": { + "shape": "CaptionSourceFramerate", + "locationName": "framerate", + "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction, using the settings Framerate numerator (framerateNumerator) and Framerate denominator (framerateDenominator). For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." + }, "SourceFile": { "shape": "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMIHttpsSccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI", "locationName": "sourceFile", @@ -4327,7 +4376,7 @@ }, "H264FramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -4362,7 +4411,7 @@ }, "H264ParControl": { "type": "string", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -4370,7 +4419,7 @@ }, "H264QualityTuningLevel": { "type": "string", - "documentation": "Use Quality tuning level (H264QualityTuningLevel) to specifiy whether to use fast single-pass, high-quality singlepass, or high-quality multipass video encoding.", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "SINGLE_PASS_HQ", @@ -4475,7 +4524,7 @@ "FramerateConversionAlgorithm": { "shape": "H264FramerateConversionAlgorithm", "locationName": "framerateConversionAlgorithm", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion." + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion." }, "FramerateDenominator": { "shape": "__integerMin1Max2147483647", @@ -4545,22 +4594,22 @@ "ParControl": { "shape": "H264ParControl", "locationName": "parControl", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Pixel Aspect Ratio denominator." + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Pixel Aspect Ratio numerator." + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "QualityTuningLevel": { "shape": "H264QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Use Quality tuning level (H264QualityTuningLevel) to specifiy whether to use fast single-pass, high-quality singlepass, or high-quality multipass video encoding." + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "QvbrSettings": { "shape": "H264QvbrSettings", @@ -4746,7 +4795,7 @@ }, "H265FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -4754,7 +4803,7 @@ }, "H265FramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -4789,7 +4838,7 @@ }, "H265ParControl": { "type": "string", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -4797,7 +4846,7 @@ }, "H265QualityTuningLevel": { "type": "string", - "documentation": "Use Quality tuning level (H265QualityTuningLevel) to specifiy whether to use fast single-pass, high-quality singlepass, or high-quality multipass video encoding.", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "SINGLE_PASS_HQ", @@ -4893,12 +4942,12 @@ "FramerateControl": { "shape": "H265FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." }, "FramerateConversionAlgorithm": { "shape": "H265FramerateConversionAlgorithm", "locationName": "framerateConversionAlgorithm", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion." + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion." }, "FramerateDenominator": { "shape": "__integerMin1Max2147483647", @@ -4968,22 +5017,22 @@ "ParControl": { "shape": "H265ParControl", "locationName": "parControl", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Pixel Aspect Ratio denominator." + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Pixel Aspect Ratio numerator." + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "QualityTuningLevel": { "shape": "H265QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Use Quality tuning level (H265QualityTuningLevel) to specifiy whether to use fast single-pass, high-quality singlepass, or high-quality multipass video encoding." + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "QvbrSettings": { "shape": "H265QvbrSettings", @@ -5589,6 +5638,27 @@ "TDRL" ] }, + "HopDestination": { + "type": "structure", + "members": { + "Priority": { + "shape": "__integerMinNegative50Max50", + "locationName": "priority", + "documentation": "Optional. When you set up a job to use queue hopping, you can specify a different relative priority for the job in the destination queue. If you don't specify, the relative priority will remain the same as in the previous queue." + }, + "Queue": { + "shape": "__string", + "locationName": "queue", + "documentation": "Optional unless the job is submitted on the default queue. When you set up a job to use queue hopping, you can specify a destination queue. This queue cannot be the original queue to which the job is submitted. If the original queue isn't the default queue and you don't specify the destination queue, the job will move to the default queue." + }, + "WaitMinutes": { + "shape": "__integer", + "locationName": "waitMinutes", + "documentation": "Required for setting up a job to use queue hopping. Minimum wait time in minutes until the job can hop to the destination queue. Valid range is 1 to 1440 minutes, inclusive." + } + }, + "documentation": "Optional. Configuration for a destination queue to which the job can hop once a customer-defined minimum wait time has passed." + }, "Id3Insertion": { "type": "structure", "members": { @@ -5622,14 +5692,14 @@ "StylePassthrough": { "shape": "ImscStylePassthrough", "locationName": "stylePassthrough", - "documentation": "Keep this setting enabled to have MediaConvert use the font style and position information from the captions source in the output. This option is available only when your input captions are CFF-TT, IMSC, SMPTE-TT, or TTML. Disable this setting for simplified output captions." + "documentation": "Keep this setting enabled to have MediaConvert use the font style and position information from the captions source in the output. This option is available only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting for simplified output captions." } }, "documentation": "Settings specific to IMSC caption outputs." }, "ImscStylePassthrough": { "type": "string", - "documentation": "Keep this setting enabled to have MediaConvert use the font style and position information from the captions source in the output. This option is available only when your input captions are CFF-TT, IMSC, SMPTE-TT, or TTML. Disable this setting for simplified output captions.", + "documentation": "Keep this setting enabled to have MediaConvert use the font style and position information from the captions source in the output. This option is available only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting for simplified output captions.", "enum": [ "ENABLED", "DISABLED" @@ -5646,12 +5716,12 @@ "AudioSelectors": { "shape": "__mapOfAudioSelector", "locationName": "audioSelectors", - "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use mutiple Audio selectors per input." + "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." }, "CaptionSelectors": { "shape": "__mapOfCaptionSelector", "locationName": "captionSelectors", - "documentation": "Use Captions selectors (CaptionSelectors) to specify the captions data from the input that you will use in your outputs. You can use mutiple captions selectors per input." + "documentation": "Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 20 captions selectors per input." }, "Crop": { "shape": "Rectangle", @@ -5661,7 +5731,7 @@ "DeblockFilter": { "shape": "InputDeblockFilter", "locationName": "deblockFilter", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manaully controllable for MPEG2 and uncompressed video inputs." + "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." }, "DecryptionSettings": { "shape": "InputDecryptionSettings", @@ -5754,7 +5824,7 @@ }, "InputDeblockFilter": { "type": "string", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manaully controllable for MPEG2 and uncompressed video inputs.", + "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.", "enum": [ "ENABLED", "DISABLED" @@ -5833,12 +5903,12 @@ "AudioSelectors": { "shape": "__mapOfAudioSelector", "locationName": "audioSelectors", - "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use mutiple Audio selectors per input." + "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." }, "CaptionSelectors": { "shape": "__mapOfCaptionSelector", "locationName": "captionSelectors", - "documentation": "Use Captions selectors (CaptionSelectors) to specify the captions data from the input that you will use in your outputs. You can use mutiple captions selectors per input." + "documentation": "Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 20 captions selectors per input." }, "Crop": { "shape": "Rectangle", @@ -5848,7 +5918,7 @@ "DeblockFilter": { "shape": "InputDeblockFilter", "locationName": "deblockFilter", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manaully controllable for MPEG2 and uncompressed video inputs." + "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." }, "DenoiseFilter": { "shape": "InputDenoiseFilter", @@ -6013,7 +6083,7 @@ "BillingTagsSource": { "shape": "BillingTagsSource", "locationName": "billingTagsSource", - "documentation": "Optional. Choose a tag type that AWS Billing and Cost Management will use to sort your AWS Elemental MediaConvert costs on any billing report that you set up. Any transcoding outputs that don't have an associated tag will appear in your billing report unsorted. If you don't choose a valid value for this field, your job outputs will appear on the billing report unsorted." + "documentation": "The tag type that AWS Billing and Cost Management will use to sort your AWS Elemental MediaConvert costs on any billing report that you set up." }, "CreatedAt": { "shape": "__timestampUnix", @@ -6035,6 +6105,11 @@ "locationName": "errorMessage", "documentation": "Error message of Job" }, + "HopDestinations": { + "shape": "__listOfHopDestination", + "locationName": "hopDestinations", + "documentation": "Optional list of hop destinations." + }, "Id": { "shape": "__string", "locationName": "id", @@ -6068,7 +6143,12 @@ "Queue": { "shape": "__string", "locationName": "queue", - "documentation": "Optional. When you create a job, you can specify a queue to send it to. If you don't specify, the job will go to the default queue. For more about queues, see the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" + "documentation": "When you create a job, you can specify a queue to send it to. If you don't specify, the job will go to the default queue. For more about queues, see the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html" + }, + "QueueTransitions": { + "shape": "__listOfQueueTransition", + "locationName": "queueTransitions", + "documentation": "The job's queue hopping history." }, "RetryCount": { "shape": "__integer", @@ -6232,6 +6312,11 @@ "locationName": "description", "documentation": "An optional description you create for each job template." }, + "HopDestinations": { + "shape": "__listOfHopDestination", + "locationName": "hopDestinations", + "documentation": "Optional list of hop destinations." + }, "LastUpdated": { "shape": "__timestampUnix", "locationName": "lastUpdated", @@ -6561,7 +6646,7 @@ "Order": { "shape": "Order", "locationName": "order", - "documentation": "When you request lists of resources, you can optionally specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", + "documentation": "Optional. When you request lists of resources, you can specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", "location": "querystring" } } @@ -6593,25 +6678,25 @@ "NextToken": { "shape": "__string", "locationName": "nextToken", - "documentation": "Use this string, provided with the response to a previous request, to request the next batch of jobs.", + "documentation": "Optional. Use this string, provided with the response to a previous request, to request the next batch of jobs.", "location": "querystring" }, "Order": { "shape": "Order", "locationName": "order", - "documentation": "When you request lists of resources, you can optionally specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", + "documentation": "Optional. When you request lists of resources, you can specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", "location": "querystring" }, "Queue": { "shape": "__string", "locationName": "queue", - "documentation": "Provide a queue name to get back only jobs from that queue.", + "documentation": "Optional. Provide a queue name to get back only jobs from that queue.", "location": "querystring" }, "Status": { "shape": "JobStatus", "locationName": "status", - "documentation": "A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR.", + "documentation": "Optional. A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR.", "location": "querystring" } } @@ -6661,7 +6746,7 @@ "Order": { "shape": "Order", "locationName": "order", - "documentation": "When you request lists of resources, you can optionally specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", + "documentation": "Optional. When you request lists of resources, you can specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", "location": "querystring" } } @@ -6705,7 +6790,7 @@ "Order": { "shape": "Order", "locationName": "order", - "documentation": "When you request lists of resources, you can optionally specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", + "documentation": "Optional. When you request lists of resources, you can specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", "location": "querystring" } } @@ -7510,7 +7595,7 @@ }, "Mpeg2FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -7518,7 +7603,7 @@ }, "Mpeg2FramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -7556,7 +7641,7 @@ }, "Mpeg2ParControl": { "type": "string", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -7564,7 +7649,7 @@ }, "Mpeg2QualityTuningLevel": { "type": "string", - "documentation": "Use Quality tuning level (Mpeg2QualityTuningLevel) to specifiy whether to use single-pass or multipass video encoding.", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "MULTI_PASS" @@ -7617,12 +7702,12 @@ "FramerateControl": { "shape": "Mpeg2FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." }, "FramerateConversionAlgorithm": { "shape": "Mpeg2FramerateConversionAlgorithm", "locationName": "framerateConversionAlgorithm", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion." + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion." }, "FramerateDenominator": { "shape": "__integerMin1Max1001", @@ -7687,22 +7772,22 @@ "ParControl": { "shape": "Mpeg2ParControl", "locationName": "parControl", - "documentation": "Using the API, enable ParFollowSource if you want the service to use the pixel aspect ratio from the input. Using the console, do this by choosing Follow source for Pixel aspect ratio." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Pixel Aspect Ratio denominator." + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Pixel Aspect Ratio numerator." + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "QualityTuningLevel": { "shape": "Mpeg2QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Use Quality tuning level (Mpeg2QualityTuningLevel) to specifiy whether to use single-pass or multipass video encoding." + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "RateControlMode": { "shape": "Mpeg2RateControlMode", @@ -7872,6 +7957,51 @@ "UTF16" ] }, + "MxfAfdSignaling": { + "type": "string", + "documentation": "Optional. When you have AFD signaling set up in your output video stream, use this setting to choose whether to also include it in the MXF wrapper. Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from the video stream for this output to the MXF wrapper. Regardless of which option you choose, the AFD values remain in the video stream. Related settings: To set up your output to include or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, find AFD signaling under the output's video encoding settings.", + "enum": [ + "NO_COPY", + "COPY_FROM_VIDEO" + ] + }, + "MxfSettings": { + "type": "structure", + "members": { + "AfdSignaling": { + "shape": "MxfAfdSignaling", + "locationName": "afdSignaling", + "documentation": "Optional. When you have AFD signaling set up in your output video stream, use this setting to choose whether to also include it in the MXF wrapper. Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from the video stream for this output to the MXF wrapper. Regardless of which option you choose, the AFD values remain in the video stream. Related settings: To set up your output to include or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, find AFD signaling under the output's video encoding settings." + } + }, + "documentation": "MXF settings" + }, + "NexGuardFileMarkerSettings": { + "type": "structure", + "members": { + "License": { + "shape": "__stringMin1Max100000", + "locationName": "license", + "documentation": "Use the base64 license string that Nagra provides you. Enter it directly in your JSON job specification or in the console. Required when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job." + }, + "Payload": { + "shape": "__integerMin0Max4194303", + "locationName": "payload", + "documentation": "Specify the payload ID that you want associated with this output. Valid values vary depending on your Nagra NexGuard forensic watermarking workflow. Required when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job. For PreRelease Content (NGPR/G2), specify an integer from 1 through 4,194,303. You must generate a unique ID for each asset you watermark, and keep a record of which ID you have assigned to each asset. Neither Nagra nor MediaConvert keep track of the relationship between output files and your IDs. For OTT Streaming, create two adaptive bitrate (ABR) stacks for each asset. Do this by setting up two output groups. For one output group, set the value of Payload ID (payload) to 0 in every output. For the other output group, set Payload ID (payload) to 1 in every output." + }, + "Preset": { + "shape": "__stringMin1Max256", + "locationName": "preset", + "documentation": "Enter one of the watermarking preset strings that Nagra provides you. Required when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job." + }, + "Strength": { + "shape": "WatermarkingStrength", + "locationName": "strength", + "documentation": "Optional. Ignore this setting unless Nagra support directs you to specify a value. When you don't specify a value here, the Nagra NexGuard library uses its default value." + } + }, + "documentation": "For forensic video watermarking, MediaConvert supports Nagra NexGuard File Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2) and OTT Streaming workflows." + }, "NielsenConfiguration": { "type": "structure", "members": { @@ -7888,6 +8018,15 @@ }, "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting." }, + "NoiseFilterPostTemporalSharpening": { + "type": "string", + "documentation": "Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), you can optionally use this setting to apply additional sharpening. The default behavior, Auto (AUTO) allows the transcoder to determine whether to apply filtering, depending on input type and quality.", + "enum": [ + "DISABLED", + "ENABLED", + "AUTO" + ] + }, "NoiseReducer": { "type": "structure", "members": { @@ -7968,6 +8107,11 @@ "locationName": "aggressiveMode", "documentation": "Use Aggressive mode for content that has complex motion. Higher values produce stronger temporal filtering. This filters highly complex scenes more aggressively and creates better VQ for low bitrate outputs." }, + "PostTemporalSharpening": { + "shape": "NoiseFilterPostTemporalSharpening", + "locationName": "postTemporalSharpening", + "documentation": "Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), you can optionally use this setting to apply additional sharpening. The default behavior, Auto (AUTO) allows the transcoder to determine whether to apply filtering, depending on input type and quality." + }, "Speed": { "shape": "__integerMinNegative1Max3", "locationName": "speed", @@ -7995,9 +8139,30 @@ }, "documentation": "The resource you requested doesn't exist." }, + "OpusSettings": { + "type": "structure", + "members": { + "Bitrate": { + "shape": "__integerMin32000Max192000", + "locationName": "bitrate", + "documentation": "Optional. Specify the average bitrate in bits per second. Valid values are multiples of 8000, from 32000 through 192000. The default value is 96000, which we recommend for quality and bandwidth." + }, + "Channels": { + "shape": "__integerMin1Max2", + "locationName": "channels", + "documentation": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2." + }, + "SampleRate": { + "shape": "__integerMin16000Max48000", + "locationName": "sampleRate", + "documentation": "Optional. Sample rate in hz. Valid values are 16000, 24000, and 48000. The default value is 48000." + } + }, + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value OPUS." + }, "Order": { "type": "string", - "documentation": "When you request lists of resources, you can optionally specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", + "documentation": "Optional. When you request lists of resources, you can specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", "enum": [ "ASCENDING", "DESCENDING" @@ -8024,7 +8189,7 @@ "Extension": { "shape": "__string", "locationName": "extension", - "documentation": "Use Extension (Extension) to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)" + "documentation": "Use Extension (Extension) to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)" }, "NameModifier": { "shape": "__stringMin1", @@ -8181,6 +8346,17 @@ }, "documentation": "Specific settings for this type of output." }, + "PartnerWatermarking": { + "type": "structure", + "members": { + "NexguardFileMarkerSettings": { + "shape": "NexGuardFileMarkerSettings", + "locationName": "nexguardFileMarkerSettings", + "documentation": "For forensic video watermarking, MediaConvert supports Nagra NexGuard File Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2) and OTT Streaming workflows." + } + }, + "documentation": "If you work with a third party video watermarking partner, use the group of settings that correspond with your watermarking partner to include watermarks in your output." + }, "Preset": { "type": "structure", "members": { @@ -8286,7 +8462,7 @@ }, "ProresFramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -8294,7 +8470,7 @@ }, "ProresFramerateConversionAlgorithm": { "type": "string", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion.", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion.", "enum": [ "DUPLICATE_DROP", "INTERPOLATE" @@ -8313,7 +8489,7 @@ }, "ProresParControl": { "type": "string", - "documentation": "Use (ProresParControl) to specify how the service determines the pixel aspect ratio. Set to Follow source (INITIALIZE_FROM_SOURCE) to use the pixel aspect ratio from the input. To specify a different pixel aspect ratio: Using the console, choose it from the dropdown menu. Using the API, set ProresParControl to (SPECIFIED) and provide for (ParNumerator) and (ParDenominator).", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -8330,12 +8506,12 @@ "FramerateControl": { "shape": "ProresFramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job sepecification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." }, "FramerateConversionAlgorithm": { "shape": "ProresFramerateConversionAlgorithm", "locationName": "framerateConversionAlgorithm", - "documentation": "When set to INTERPOLATE, produces smoother motion during frame rate conversion." + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use duplicate drop conversion." }, "FramerateDenominator": { "shape": "__integerMin1Max2147483647", @@ -8355,17 +8531,17 @@ "ParControl": { "shape": "ProresParControl", "locationName": "parControl", - "documentation": "Use (ProresParControl) to specify how the service determines the pixel aspect ratio. Set to Follow source (INITIALIZE_FROM_SOURCE) to use the pixel aspect ratio from the input. To specify a different pixel aspect ratio: Using the console, choose it from the dropdown menu. Using the API, set ProresParControl to (SPECIFIED) and provide for (ParNumerator) and (ParDenominator)." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Pixel Aspect Ratio denominator." + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Pixel Aspect Ratio numerator." + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "SlowPal": { "shape": "ProresSlowPal", @@ -8476,6 +8652,27 @@ "PAUSED" ] }, + "QueueTransition": { + "type": "structure", + "members": { + "DestinationQueue": { + "shape": "__string", + "locationName": "destinationQueue", + "documentation": "The queue that the job was on after the transition." + }, + "SourceQueue": { + "shape": "__string", + "locationName": "sourceQueue", + "documentation": "The queue that the job was on before the transition." + }, + "Timestamp": { + "shape": "__timestampUnix", + "locationName": "timestamp", + "documentation": "The time, in Unix epoch format, that the job moved from the source queue to the destination queue." + } + }, + "documentation": "Description of the source and destination queues between which the job has moved, along with the timestamp of the move" + }, "Rectangle": { "type": "structure", "members": { @@ -9033,14 +9230,14 @@ "StylePassthrough": { "shape": "TtmlStylePassthrough", "locationName": "stylePassthrough", - "documentation": "Pass through style and position information from a TTML-like input source (TTML, SMPTE-TT, CFF-TT) to the CFF-TT output or TTML output." + "documentation": "Pass through style and position information from a TTML-like input source (TTML, SMPTE-TT) to the TTML output." } }, "documentation": "Settings specific to TTML caption outputs, including Pass style information (TtmlStylePassthrough)." }, "TtmlStylePassthrough": { "type": "string", - "documentation": "Pass through style and position information from a TTML-like input source (TTML, SMPTE-TT, CFF-TT) to the CFF-TT output or TTML output.", + "documentation": "Pass through style and position information from a TTML-like input source (TTML, SMPTE-TT) to the TTML output.", "enum": [ "ENABLED", "DISABLED" @@ -9095,6 +9292,11 @@ "locationName": "description", "documentation": "The new description for the job template, if you are changing it." }, + "HopDestinations": { + "shape": "__listOfHopDestination", + "locationName": "hopDestinations", + "documentation": "Optional list of hop destinations." + }, "Name": { "shape": "__string", "locationName": "name", @@ -9223,7 +9425,9 @@ "H_264", "H_265", "MPEG2", - "PRORES" + "PRORES", + "VP8", + "VP9" ] }, "VideoCodecSettings": { @@ -9263,9 +9467,19 @@ "shape": "ProresSettings", "locationName": "proresSettings", "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value PRORES." + }, + "Vp8Settings": { + "shape": "Vp8Settings", + "locationName": "vp8Settings", + "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP8." + }, + "Vp9Settings": { + "shape": "Vp9Settings", + "locationName": "vp9Settings", + "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP9." } }, - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings" + "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VP8, Vp8Settings * VP9, Vp9Settings" }, "VideoDescription": { "type": "structure", @@ -9283,7 +9497,7 @@ "CodecSettings": { "shape": "VideoCodecSettings", "locationName": "codecSettings", - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings" + "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, FrameCaptureSettings * AV1, Av1Settings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VP8, Vp8Settings * VP9, Vp9Settings" }, "ColorMetadata": { "shape": "ColorMetadata", @@ -9392,6 +9606,11 @@ "locationName": "noiseReducer", "documentation": "Enable the Noise reducer (NoiseReducer) feature to remove noise from your video output if necessary. Enable or disable this feature for each output individually. This setting is disabled by default." }, + "PartnerWatermarking": { + "shape": "PartnerWatermarking", + "locationName": "partnerWatermarking", + "documentation": "If you work with a third party video watermarking partner, use the group of settings that correspond with your watermarking partner to include watermarks in your output." + }, "TimecodeBurnin": { "shape": "TimecodeBurnin", "locationName": "timecodeBurnin", @@ -9449,6 +9668,258 @@ "PIC_TIMING_SEI" ] }, + "VorbisSettings": { + "type": "structure", + "members": { + "Channels": { + "shape": "__integerMin1Max2", + "locationName": "channels", + "documentation": "Optional. Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2. The default value is 2." + }, + "SampleRate": { + "shape": "__integerMin22050Max48000", + "locationName": "sampleRate", + "documentation": "Optional. Specify the audio sample rate in Hz. Valid values are 22050, 32000, 44100, and 48000. The default value is 48000." + }, + "VbrQuality": { + "shape": "__integerMinNegative1Max10", + "locationName": "vbrQuality", + "documentation": "Optional. Specify the variable audio quality of this Vorbis output from -1 (lowest quality, ~45 kbit/s) to 10 (highest quality, ~500 kbit/s). The default value is 4 (~128 kbit/s). Values 5 and 6 are approximately 160 and 192 kbit/s, respectively." + } + }, + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value Vorbis." + }, + "Vp8FramerateControl": { + "type": "string", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "enum": [ + "INITIALIZE_FROM_SOURCE", + "SPECIFIED" + ] + }, + "Vp8FramerateConversionAlgorithm": { + "type": "string", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use Drop duplicate (DUPLICATE_DROP) conversion. When you choose Interpolate (INTERPOLATE) instead, the conversion produces smoother motion.", + "enum": [ + "DUPLICATE_DROP", + "INTERPOLATE" + ] + }, + "Vp8ParControl": { + "type": "string", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "enum": [ + "INITIALIZE_FROM_SOURCE", + "SPECIFIED" + ] + }, + "Vp8QualityTuningLevel": { + "type": "string", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", + "enum": [ + "MULTI_PASS", + "MULTI_PASS_HQ" + ] + }, + "Vp8RateControlMode": { + "type": "string", + "documentation": "With the VP8 codec, you can use only the variable bitrate (VBR) rate control mode.", + "enum": [ + "VBR" + ] + }, + "Vp8Settings": { + "type": "structure", + "members": { + "Bitrate": { + "shape": "__integerMin1000Max1152000000", + "locationName": "bitrate", + "documentation": "Target bitrate in bits/second. For example, enter five megabits per second as 5000000." + }, + "FramerateControl": { + "shape": "Vp8FramerateControl", + "locationName": "framerateControl", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + }, + "FramerateConversionAlgorithm": { + "shape": "Vp8FramerateConversionAlgorithm", + "locationName": "framerateConversionAlgorithm", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use Drop duplicate (DUPLICATE_DROP) conversion. When you choose Interpolate (INTERPOLATE) instead, the conversion produces smoother motion." + }, + "FramerateDenominator": { + "shape": "__integerMin1Max2147483647", + "locationName": "framerateDenominator", + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + }, + "FramerateNumerator": { + "shape": "__integerMin1Max2147483647", + "locationName": "framerateNumerator", + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + }, + "GopSize": { + "shape": "__doubleMin0", + "locationName": "gopSize", + "documentation": "GOP Length (keyframe interval) in frames. Must be greater than zero." + }, + "HrdBufferSize": { + "shape": "__integerMin0Max47185920", + "locationName": "hrdBufferSize", + "documentation": "Optional. Size of buffer (HRD buffer model) in bits. For example, enter five megabits as 5000000." + }, + "MaxBitrate": { + "shape": "__integerMin1000Max1152000000", + "locationName": "maxBitrate", + "documentation": "Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional. Specify the maximum bitrate in bits/second. For example, enter five megabits per second as 5000000. The default behavior uses twice the target bitrate as the maximum bitrate." + }, + "ParControl": { + "shape": "Vp8ParControl", + "locationName": "parControl", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + }, + "ParDenominator": { + "shape": "__integerMin1Max2147483647", + "locationName": "parDenominator", + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + }, + "ParNumerator": { + "shape": "__integerMin1Max2147483647", + "locationName": "parNumerator", + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + }, + "QualityTuningLevel": { + "shape": "Vp8QualityTuningLevel", + "locationName": "qualityTuningLevel", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." + }, + "RateControlMode": { + "shape": "Vp8RateControlMode", + "locationName": "rateControlMode", + "documentation": "With the VP8 codec, you can use only the variable bitrate (VBR) rate control mode." + } + }, + "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP8." + }, + "Vp9FramerateControl": { + "type": "string", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "enum": [ + "INITIALIZE_FROM_SOURCE", + "SPECIFIED" + ] + }, + "Vp9FramerateConversionAlgorithm": { + "type": "string", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use Drop duplicate (DUPLICATE_DROP) conversion. When you choose Interpolate (INTERPOLATE) instead, the conversion produces smoother motion.", + "enum": [ + "DUPLICATE_DROP", + "INTERPOLATE" + ] + }, + "Vp9ParControl": { + "type": "string", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "enum": [ + "INITIALIZE_FROM_SOURCE", + "SPECIFIED" + ] + }, + "Vp9QualityTuningLevel": { + "type": "string", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", + "enum": [ + "MULTI_PASS", + "MULTI_PASS_HQ" + ] + }, + "Vp9RateControlMode": { + "type": "string", + "documentation": "With the VP9 codec, you can use only the variable bitrate (VBR) rate control mode.", + "enum": [ + "VBR" + ] + }, + "Vp9Settings": { + "type": "structure", + "members": { + "Bitrate": { + "shape": "__integerMin1000Max480000000", + "locationName": "bitrate", + "documentation": "Target bitrate in bits/second. For example, enter five megabits per second as 5000000." + }, + "FramerateControl": { + "shape": "Vp9FramerateControl", + "locationName": "framerateControl", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + }, + "FramerateConversionAlgorithm": { + "shape": "Vp9FramerateConversionAlgorithm", + "locationName": "framerateConversionAlgorithm", + "documentation": "Optional. Specify how the transcoder performs framerate conversion. The default behavior is to use Drop duplicate (DUPLICATE_DROP) conversion. When you choose Interpolate (INTERPOLATE) instead, the conversion produces smoother motion." + }, + "FramerateDenominator": { + "shape": "__integerMin1Max2147483647", + "locationName": "framerateDenominator", + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + }, + "FramerateNumerator": { + "shape": "__integerMin1Max2147483647", + "locationName": "framerateNumerator", + "documentation": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateNumerator to specify the numerator of this fraction. In this example, use 24000 for the value of FramerateNumerator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976." + }, + "GopSize": { + "shape": "__doubleMin0", + "locationName": "gopSize", + "documentation": "GOP Length (keyframe interval) in frames. Must be greater than zero." + }, + "HrdBufferSize": { + "shape": "__integerMin0Max47185920", + "locationName": "hrdBufferSize", + "documentation": "Size of buffer (HRD buffer model) in bits. For example, enter five megabits as 5000000." + }, + "MaxBitrate": { + "shape": "__integerMin1000Max480000000", + "locationName": "maxBitrate", + "documentation": "Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional. Specify the maximum bitrate in bits/second. For example, enter five megabits per second as 5000000. The default behavior uses twice the target bitrate as the maximum bitrate." + }, + "ParControl": { + "shape": "Vp9ParControl", + "locationName": "parControl", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio for this output. The default behavior is to use the same pixel aspect ratio as your input video." + }, + "ParDenominator": { + "shape": "__integerMin1Max2147483647", + "locationName": "parDenominator", + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + }, + "ParNumerator": { + "shape": "__integerMin1Max2147483647", + "locationName": "parNumerator", + "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + }, + "QualityTuningLevel": { + "shape": "Vp9QualityTuningLevel", + "locationName": "qualityTuningLevel", + "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." + }, + "RateControlMode": { + "shape": "Vp9RateControlMode", + "locationName": "rateControlMode", + "documentation": "With the VP9 codec, you can use only the variable bitrate (VBR) rate control mode." + } + }, + "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP9." + }, + "WatermarkingStrength": { + "type": "string", + "documentation": "Optional. Ignore this setting unless Nagra support directs you to specify a value. When you don't specify a value here, the Nagra NexGuard library uses its default value.", + "enum": [ + "LIGHTEST", + "LIGHTER", + "DEFAULT", + "STRONGER", + "STRONGEST" + ] + }, "WavFormat": { "type": "string", "documentation": "The service defaults to using RIFF for WAV outputs. If your output audio is likely to exceed 4 GB in file size, or if you otherwise need the extended support of the RF64 format, set your output WAV file format to RF64.", @@ -9603,6 +10074,11 @@ "min": 0, "max": 4 }, + "__integerMin0Max4194303": { + "type": "integer", + "min": 0, + "max": 4194303 + }, "__integerMin0Max47185920": { "type": "integer", "min": 0, @@ -9673,6 +10149,11 @@ "min": 1000, "max": 300000000 }, + "__integerMin1000Max480000000": { + "type": "integer", + "min": 1000, + "max": 480000000 + }, "__integerMin10Max48": { "type": "integer", "min": 10, @@ -9683,6 +10164,11 @@ "min": 16000, "max": 320000 }, + "__integerMin16000Max48000": { + "type": "integer", + "min": 16000, + "max": 48000 + }, "__integerMin16Max24": { "type": "integer", "min": 16, @@ -9758,6 +10244,11 @@ "min": 1, "max": 6 }, + "__integerMin1Max60000": { + "type": "integer", + "min": 1, + "max": 60000 + }, "__integerMin1Max64": { "type": "integer", "min": 1, @@ -9788,6 +10279,11 @@ "min": 2, "max": 2147483647 }, + "__integerMin32000Max192000": { + "type": "integer", + "min": 32000, + "max": 192000 + }, "__integerMin32000Max384000": { "type": "integer", "min": 32000, @@ -9858,6 +10354,11 @@ "min": -180, "max": 180 }, + "__integerMinNegative1Max10": { + "type": "integer", + "min": -1, + "max": 10 + }, "__integerMinNegative1Max3": { "type": "integer", "min": -1, @@ -9947,6 +10448,12 @@ "shape": "HlsCaptionLanguageMapping" } }, + "__listOfHopDestination": { + "type": "list", + "member": { + "shape": "HopDestination" + } + }, "__listOfId3Insertion": { "type": "list", "member": { @@ -10037,6 +10544,12 @@ "shape": "Queue" } }, + "__listOfQueueTransition": { + "type": "list", + "member": { + "shape": "QueueTransition" + } + }, "__listOfTeletextPageType": { "type": "list", "member": { @@ -10171,6 +10684,11 @@ "max": 24, "pattern": "^[A-Za-z0-9+\\/]{22}==$|^[A-Za-z0-9+\\/]{16}$" }, + "__stringMin1Max100000": { + "type": "string", + "min": 1, + "max": 100000 + }, "__stringMin1Max256": { "type": "string", "min": 1, @@ -10272,14 +10790,14 @@ "type": "string", "pattern": "^s3:\\/\\/.*\\/(ASSETMAP.xml)?$" }, - "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { - "type": "string", - "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" - }, "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLLHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { "type": "string", "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" }, + "__stringPatternS3WWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { + "type": "string", + "pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([wW][eE][bB][mM]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + }, "__stringPatternSNManifestConfirmConditionNotificationNS": { "type": "string", "pattern": "^\\s*<(.|\\n)*ManifestConfirmConditionNotification(.|\\n)*>\\s*$" @@ -10306,4 +10824,4 @@ } }, "documentation": "AWS Elemental MediaConvert" -} \ No newline at end of file +} diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index 7e62d4aed2f7..7ba6164c6ed6 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 medialive diff --git a/services/medialive/src/main/resources/codegen-resources/paginators-1.json b/services/medialive/src/main/resources/codegen-resources/paginators-1.json index ca39535729b3..dafc70dc02c0 100644 --- a/services/medialive/src/main/resources/codegen-resources/paginators-1.json +++ b/services/medialive/src/main/resources/codegen-resources/paginators-1.json @@ -47,6 +47,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Multiplexes" + }, + "ListInputDevices": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InputDevices" } } } diff --git a/services/medialive/src/main/resources/codegen-resources/service-2.json b/services/medialive/src/main/resources/codegen-resources/service-2.json index e55be0908b40..1d463ce4b8f6 100644 --- a/services/medialive/src/main/resources/codegen-resources/service-2.json +++ b/services/medialive/src/main/resources/codegen-resources/service-2.json @@ -9,7 +9,7 @@ "uid": "medialive-2017-10-14", "signatureVersion": "v4", "serviceAbbreviation": "MediaLive", - "jsonVersion": "1.1" + "jsonVersion": "1.1" }, "operations": { "BatchUpdateSchedule": { @@ -555,7 +555,7 @@ }, { "shape": "NotFoundException", - "documentation": "The program that you are trying to delete doesn\u2019t exist. Check the ID and try again." + "documentation": "The program that you are trying to delete doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -790,6 +790,52 @@ ], "documentation": "Produces details about an input" }, + "DescribeInputDevice": { + "name": "DescribeInputDevice", + "http": { + "method": "GET", + "requestUri": "/prod/inputDevices/{inputDeviceId}", + "responseCode": 200 + }, + "input": { + "shape": "DescribeInputDeviceRequest" + }, + "output": { + "shape": "DescribeInputDeviceResponse", + "documentation": "Details for the input device." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to describe the input device." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error." + }, + { + "shape": "NotFoundException", + "documentation": "The input device you're requesting to describe does not exist. Check the ID." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway timeout error." + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on describe calls to the input device service." + } + ], + "documentation": "Gets the details for the input device" + }, "DescribeInputSecurityGroup": { "name": "DescribeInputSecurityGroup", "http": { @@ -869,7 +915,7 @@ }, { "shape": "NotFoundException", - "documentation": "The multiplex that you are trying to describe doesn\u2019t exist. Check the ID and try again." + "documentation": "The multiplex that you are trying to describe doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -915,7 +961,7 @@ }, { "shape": "NotFoundException", - "documentation": "MediaLive can't describe the program. The multiplex or the program that you specified doesn\u2019t exist. Check the IDs and try again." + "documentation": "MediaLive can't describe the program. The multiplex or the program that you specified doesn’t exist. Check the IDs and try again." }, { "shape": "GatewayTimeoutException", @@ -1108,6 +1154,48 @@ ], "documentation": "Produces list of channels that have been created" }, + "ListInputDevices": { + "name": "ListInputDevices", + "http": { + "method": "GET", + "requestUri": "/prod/inputDevices", + "responseCode": 200 + }, + "input": { + "shape": "ListInputDevicesRequest" + }, + "output": { + "shape": "ListInputDevicesResponse", + "documentation": "An array of input devices." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to list input devices." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway timeout error." + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on list devices calls to the input device service." + } + ], + "documentation": "List input devices" + }, "ListInputSecurityGroups": { "name": "ListInputSecurityGroups", "http": { @@ -1225,7 +1313,7 @@ }, { "shape": "NotFoundException", - "documentation": "MediaLive can't provide the list of programs. The multiplex that you specified doesn\u2019t exist. Check the ID and try again." + "documentation": "MediaLive can't provide the list of programs. The multiplex that you specified doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -1531,7 +1619,7 @@ }, { "shape": "NotFoundException", - "documentation": "The multiplex that you are trying to start doesn\u2019t exist. Check the ID and try again." + "documentation": "The multiplex that you are trying to start doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -1631,7 +1719,7 @@ }, { "shape": "NotFoundException", - "documentation": "The multiplex that you are trying to stop doesn\u2019t exist. Check the ID and try again." + "documentation": "The multiplex that you are trying to stop doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -1794,6 +1882,56 @@ ], "documentation": "Updates an input." }, + "UpdateInputDevice": { + "name": "UpdateInputDevice", + "http": { + "method": "PUT", + "requestUri": "/prod/inputDevices/{inputDeviceId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateInputDeviceRequest" + }, + "output": { + "shape": "UpdateInputDeviceResponse", + "documentation": "Input device update is in progress." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "This request was invalid." + }, + { + "shape": "UnprocessableEntityException", + "documentation": "Input device failed validation and could not be created." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Unexpected internal service error." + }, + { + "shape": "ForbiddenException", + "documentation": "You do not have permission to update the input device." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad gateway error." + }, + { + "shape": "NotFoundException", + "documentation": "The input device you're requesting to does not exist. Check the ID." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway timeout error." + }, + { + "shape": "TooManyRequestsException", + "documentation": "Request limit exceeded on update calls to the input device service." + } + ], + "documentation": "Updates the parameters for the input device." + }, "UpdateInputSecurityGroup": { "name": "UpdateInputSecurityGroup", "http": { @@ -1877,7 +2015,7 @@ }, { "shape": "NotFoundException", - "documentation": "The multiplex that you are trying to update doesn\u2019t exist. Check the ID and try again." + "documentation": "The multiplex that you are trying to update doesn’t exist. Check the ID and try again." }, { "shape": "GatewayTimeoutException", @@ -1927,7 +2065,7 @@ }, { "shape": "NotFoundException", - "documentation": "MediaLive can't update the program. The multiplex or the program that you specified doesn\u2019t exist. Check the IDs and try again." + "documentation": "MediaLive can't update the program. The multiplex or the program that you specified doesn’t exist. Check the IDs and try again." }, { "shape": "GatewayTimeoutException", @@ -2550,10 +2688,42 @@ "AudioPidSelection": { "shape": "AudioPidSelection", "locationName": "audioPidSelection" + }, + "AudioTrackSelection": { + "shape": "AudioTrackSelection", + "locationName": "audioTrackSelection" } }, "documentation": "Audio Selector Settings" }, + "AudioTrack": { + "type": "structure", + "members": { + "Track": { + "shape": "__integerMin1", + "locationName": "track", + "documentation": "1-based integer value that maps to a specific audio track" + } + }, + "documentation": "Audio Track", + "required": [ + "Track" + ] + }, + "AudioTrackSelection": { + "type": "structure", + "members": { + "Tracks": { + "shape": "__listOfAudioTrack", + "locationName": "tracks", + "documentation": "Selects one or more unique audio tracks from within an mp4 source." + } + }, + "documentation": "Audio Track Selection", + "required": [ + "Tracks" + ] + }, "AudioType": { "type": "string", "documentation": "Audio Type", @@ -2572,6 +2742,25 @@ "COMMON" ] }, + "AutomaticInputFailoverSettings": { + "type": "structure", + "members": { + "InputPreference": { + "shape": "InputPreference", + "locationName": "inputPreference", + "documentation": "Input preference when deciding which input to make active when a previously failed input has recovered." + }, + "SecondaryInputId": { + "shape": "__string", + "locationName": "secondaryInputId", + "documentation": "The input ID of the secondary input in the automatic input failover pair." + } + }, + "documentation": "The settings for Automatic Input Failover.", + "required": [ + "SecondaryInputId" + ] + }, "AvailBlanking": { "type": "structure", "members": { @@ -3496,6 +3685,11 @@ "locationName": "destinations", "documentation": "Destination settings for PUSH type inputs." }, + "InputDevices": { + "shape": "__listOfInputDeviceSettings", + "locationName": "inputDevices", + "documentation": "Settings for the devices." + }, "InputSecurityGroups": { "shape": "__listOf__string", "locationName": "inputSecurityGroups", @@ -3551,6 +3745,11 @@ "locationName": "destinations", "documentation": "Destination settings for PUSH type inputs." }, + "InputDevices": { + "shape": "__listOfInputDeviceSettings", + "locationName": "inputDevices", + "documentation": "Settings for the devices." + }, "InputSecurityGroups": { "shape": "__listOf__string", "locationName": "inputSecurityGroups", @@ -4353,6 +4552,77 @@ }, "documentation": "Placeholder documentation for DescribeChannelResponse" }, + "DescribeInputDeviceRequest": { + "type": "structure", + "members": { + "InputDeviceId": { + "shape": "__string", + "location": "uri", + "locationName": "inputDeviceId", + "documentation": "The unique ID of this input device. For example, hd-123456789abcdef." + } + }, + "required": [ + "InputDeviceId" + ], + "documentation": "Placeholder documentation for DescribeInputDeviceRequest" + }, + "DescribeInputDeviceResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "The unique ARN of the input device." + }, + "ConnectionState": { + "shape": "InputDeviceConnectionState", + "locationName": "connectionState", + "documentation": "The state of the connection between the input device and AWS." + }, + "DeviceSettingsSyncState": { + "shape": "DeviceSettingsSyncState", + "locationName": "deviceSettingsSyncState", + "documentation": "The status of the action to synchronize the device configuration. If you change the configuration of the input device (for example, the maximum bitrate), MediaLive sends the new data to the device. The device might not update itself immediately. SYNCED means the device has updated its configuration. SYNCING means that it has not updated its configuration." + }, + "HdDeviceSettings": { + "shape": "InputDeviceHdSettings", + "locationName": "hdDeviceSettings", + "documentation": "Settings that describe an input device that is type HD." + }, + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "The unique ID of the input device." + }, + "MacAddress": { + "shape": "__string", + "locationName": "macAddress", + "documentation": "The network MAC address of the input device." + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "A name that you specify for the input device." + }, + "NetworkSettings": { + "shape": "InputDeviceNetworkSettings", + "locationName": "networkSettings", + "documentation": "The network settings for the input device." + }, + "SerialNumber": { + "shape": "__string", + "locationName": "serialNumber", + "documentation": "The unique serial number of the input device." + }, + "Type": { + "shape": "InputDeviceType", + "locationName": "type", + "documentation": "The type of the input device." + } + }, + "documentation": "Placeholder documentation for DescribeInputDeviceResponse" + }, "DescribeInputRequest": { "type": "structure", "members": { @@ -4396,6 +4666,11 @@ "locationName": "inputClass", "documentation": "STANDARD - MediaLive expects two sources to be connected to this input. If the channel is also STANDARD, both sources will be ingested. If the channel is SINGLE_PIPELINE, only the first source will be ingested; the second source will always be ignored, even if the first source fails.\nSINGLE_PIPELINE - You can connect only one source to this input. If the ChannelClass is also SINGLE_PIPELINE, this value is valid. If the ChannelClass is STANDARD, this value is not valid because the channel requires two sources in the input.\n" }, + "InputDevices": { + "shape": "__listOfInputDeviceSettings", + "locationName": "inputDevices", + "documentation": "Settings for the input devices." + }, "InputSourceType": { "shape": "InputSourceType", "locationName": "inputSourceType", @@ -4840,6 +5115,14 @@ }, "documentation": "Placeholder documentation for DescribeScheduleResponse" }, + "DeviceSettingsSyncState": { + "type": "string", + "documentation": "The status of the action to synchronize the device configuration. If you change the configuration of the input device (for example, the maximum bitrate), MediaLive sends the new data to the device. The device might not update itself immediately. SYNCED means the device has updated its configuration. SYNCING means that it has not updated its configuration.", + "enum": [ + "SYNCED", + "SYNCING" + ] + }, "DvbNitSettings": { "type": "structure", "members": { @@ -5394,6 +5677,11 @@ "locationName": "captionDescriptions", "documentation": "Settings for caption decriptions" }, + "FeatureActivations": { + "shape": "FeatureActivations", + "locationName": "featureActivations", + "documentation": "Feature Activations" + }, "GlobalConfiguration": { "shape": "GlobalConfiguration", "locationName": "globalConfiguration", @@ -5426,6 +5714,25 @@ "TimecodeConfig" ] }, + "FeatureActivations": { + "type": "structure", + "members": { + "InputPrepareScheduleActions": { + "shape": "FeatureActivationsInputPrepareScheduleActions", + "locationName": "inputPrepareScheduleActions", + "documentation": "Enables the Input Prepare feature. You can create Input Prepare actions in the schedule only if this feature is enabled.\nIf you disable the feature on an existing schedule, make sure that you first delete all input prepare actions from the schedule." + } + }, + "documentation": "Feature Activations" + }, + "FeatureActivationsInputPrepareScheduleActions": { + "type": "string", + "documentation": "Feature Activations Input Prepare Schedule Actions", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, "FecOutputIncludeFec": { "type": "string", "documentation": "Fec Output Include Fec", @@ -5493,10 +5800,36 @@ "shape": "__string", "locationName": "audioRenditionSets", "documentation": "List all the audio groups that are used with the video output stream. Input all the audio GROUP-IDs that are associated to the video, separate by ','." + }, + "NielsenId3Behavior": { + "shape": "Fmp4NielsenId3Behavior", + "locationName": "nielsenId3Behavior", + "documentation": "If set to passthrough, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output." + }, + "TimedMetadataBehavior": { + "shape": "Fmp4TimedMetadataBehavior", + "locationName": "timedMetadataBehavior", + "documentation": "When set to passthrough, timed metadata is passed through from input to output." } }, "documentation": "Fmp4 Hls Settings" }, + "Fmp4NielsenId3Behavior": { + "type": "string", + "documentation": "Fmp4 Nielsen Id3 Behavior", + "enum": [ + "NO_PASSTHROUGH", + "PASSTHROUGH" + ] + }, + "Fmp4TimedMetadataBehavior": { + "type": "string", + "documentation": "Fmp4 Timed Metadata Behavior", + "enum": [ + "NO_PASSTHROUGH", + "PASSTHROUGH" + ] + }, "FollowModeScheduleActionStartSettings": { "type": "structure", "members": { @@ -5626,7 +5959,7 @@ "OutputLockingMode": { "shape": "GlobalConfigurationOutputLockingMode", "locationName": "outputLockingMode", - "documentation": "Indicates how MediaLive pipelines are synchronized.\n\nPIPELINELOCKING - MediaLive will attempt to synchronize the output of each pipeline to the other.\nEPOCHLOCKING - MediaLive will attempt to synchronize the output of each pipeline to the Unix epoch." + "documentation": "Indicates how MediaLive pipelines are synchronized.\n\nPIPELINE_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the other.\nEPOCH_LOCKING - MediaLive will attempt to synchronize the output of each pipeline to the Unix epoch." }, "OutputTimingSource": { "shape": "GlobalConfigurationOutputTimingSource", @@ -5719,6 +6052,16 @@ "CAVLC" ] }, + "H264FilterSettings": { + "type": "structure", + "members": { + "TemporalFilterSettings": { + "shape": "TemporalFilterSettings", + "locationName": "temporalFilterSettings" + } + }, + "documentation": "H264 Filter Settings" + }, "H264FlickerAq": { "type": "string", "documentation": "H264 Flicker Aq", @@ -5727,6 +6070,14 @@ "ENABLED" ] }, + "H264ForceFieldPictures": { + "type": "string", + "documentation": "H264 Force Field Pictures", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, "H264FramerateControl": { "type": "string", "documentation": "H264 Framerate Control", @@ -5803,6 +6154,14 @@ "MAIN" ] }, + "H264QualityLevel": { + "type": "string", + "documentation": "H264 Quality Level", + "enum": [ + "ENHANCED_QUALITY", + "STANDARD_QUALITY" + ] + }, "H264RateControlMode": { "type": "string", "documentation": "H264 Rate Control Mode", @@ -5872,6 +6231,11 @@ "locationName": "entropyEncoding", "documentation": "Entropy encoding mode. Use cabac (must be in Main or High profile) or cavlc." }, + "FilterSettings": { + "shape": "H264FilterSettings", + "locationName": "filterSettings", + "documentation": "Optional filters that you can apply to an encode." + }, "FixedAfd": { "shape": "FixedAfd", "locationName": "fixedAfd", @@ -5882,6 +6246,11 @@ "locationName": "flickerAq", "documentation": "If set to enabled, adjust quantization within each frame to reduce flicker or 'pop' on I-frames." }, + "ForceFieldPictures": { + "shape": "H264ForceFieldPictures", + "locationName": "forceFieldPictures", + "documentation": "This setting applies only when scan type is \"interlaced.\" It controls whether coding is performed on a field basis or on a frame basis. (When the video is progressive, the coding is always performed on a frame basis.)\nenabled: Force MediaLive to code on a field basis, so that odd and even sets of fields are coded separately.\ndisabled: Code the two sets of fields separately (on a field basis) or together (on a frame basis using PAFF), depending on what is most appropriate for the content." + }, "FramerateControl": { "shape": "H264FramerateControl", "locationName": "framerateControl", @@ -5967,6 +6336,11 @@ "locationName": "profile", "documentation": "H.264 Profile." }, + "QualityLevel": { + "shape": "H264QualityLevel", + "locationName": "qualityLevel", + "documentation": "Leave as STANDARD_QUALITY or choose a different value (which might result in additional costs to run the channel).\n- ENHANCED_QUALITY: Produces a slightly better video quality without an increase in the bitrate. Has an effect only when the Rate control mode is QVBR or CBR. If this channel is in a MediaLive multiplex, the value must be ENHANCED_QUALITY.\n- STANDARD_QUALITY: Valid for any Rate control mode." + }, "QvbrQualityLevel": { "shape": "__integerMin1Max10", "locationName": "qvbrQualityLevel", @@ -6667,7 +7041,7 @@ "OutputSelection": { "shape": "HlsOutputSelection", "locationName": "outputSelection", - "documentation": "MANIFESTSANDSEGMENTS: Generates manifests (master manifest, if applicable, and media manifests) for this output group.\n\nSEGMENTSONLY: Does not generate any manifests for this output group." + "documentation": "MANIFESTS_AND_SEGMENTS: Generates manifests (master manifest, if applicable, and media manifests) for this output group.\n\nVARIANT_MANIFESTS_AND_SEGMENTS: Generates media manifests for this output group, but not a master manifest.\n\nSEGMENTS_ONLY: Does not generate any manifests for this output group." }, "ProgramDateTime": { "shape": "HlsProgramDateTime", @@ -6722,7 +7096,7 @@ "TsFileMode": { "shape": "HlsTsFileMode", "locationName": "tsFileMode", - "documentation": "SEGMENTEDFILES: Emit the program as segments - multiple .ts media files.\n\nSINGLEFILE: Applies only if Mode field is VOD. Emit the program as a single .ts media file. The media manifest includes #EXT-X-BYTERANGE tags to index segments for playback. A typical use for this value is when sending the output to AWS Elemental MediaConvert, which can accept only a single media file. Playback while the channel is running is not guaranteed due to HTTP server caching." + "documentation": "SEGMENTED_FILES: Emit the program as segments - multiple .ts media files.\n\nSINGLE_FILE: Applies only if Mode field is VOD. Emit the program as a single .ts media file. The media manifest includes #EXT-X-BYTERANGE tags to index segments for playback. A typical use for this value is when sending the output to AWS Elemental MediaConvert, which can accept only a single media file. Playback while the channel is running is not guaranteed due to HTTP server caching." } }, "documentation": "Hls Group Settings", @@ -7033,7 +7407,7 @@ "type": "structure", "members": { }, - "documentation": "Settings to configure an action so that it occurs immediately. This is only supported for input switch actions currently." + "documentation": "Settings to configure an action so that it occurs as soon as possible." }, "Input": { "type": "structure", @@ -7063,6 +7437,11 @@ "locationName": "inputClass", "documentation": "STANDARD - MediaLive expects two sources to be connected to this input. If the channel is also STANDARD, both sources will be ingested. If the channel is SINGLE_PIPELINE, only the first source will be ingested; the second source will always be ignored, even if the first source fails.\nSINGLE_PIPELINE - You can connect only one source to this input. If the ChannelClass is also SINGLE_PIPELINE, this value is valid. If the ChannelClass is STANDARD, this value is not valid because the channel requires two sources in the input.\n" }, + "InputDevices": { + "shape": "__listOfInputDeviceSettings", + "locationName": "inputDevices", + "documentation": "Settings for the input devices." + }, "InputSourceType": { "shape": "InputSourceType", "locationName": "inputSourceType", @@ -7112,6 +7491,11 @@ "InputAttachment": { "type": "structure", "members": { + "AutomaticInputFailoverSettings": { + "shape": "AutomaticInputFailoverSettings", + "locationName": "automaticInputFailoverSettings", + "documentation": "User-specified settings for defining what the conditions are for declaring the input unhealthy and failing over to a different input." + }, "InputAttachmentName": { "shape": "__string", "locationName": "inputAttachmentName", @@ -7259,32 +7643,331 @@ }, "documentation": "The properties for a VPC type input destination." }, - "InputFilter": { - "type": "string", - "documentation": "Input Filter", - "enum": [ - "AUTO", - "DISABLED", - "FORCED" - ] - }, - "InputLocation": { + "InputDevice": { "type": "structure", "members": { - "PasswordParam": { + "Arn": { "shape": "__string", - "locationName": "passwordParam", - "documentation": "key used to extract the password from EC2 Parameter store" + "locationName": "arn", + "documentation": "The unique ARN of the input device." }, - "Uri": { + "ConnectionState": { + "shape": "InputDeviceConnectionState", + "locationName": "connectionState", + "documentation": "The state of the connection between the input device and AWS." + }, + "DeviceSettingsSyncState": { + "shape": "DeviceSettingsSyncState", + "locationName": "deviceSettingsSyncState", + "documentation": "The status of the action to synchronize the device configuration. If you change the configuration of the input device (for example, the maximum bitrate), MediaLive sends the new data to the device. The device might not update itself immediately. SYNCED means the device has updated its configuration. SYNCING means that it has not updated its configuration." + }, + "HdDeviceSettings": { + "shape": "InputDeviceHdSettings", + "locationName": "hdDeviceSettings", + "documentation": "Settings that describe an input device that is type HD." + }, + "Id": { "shape": "__string", - "locationName": "uri", - "documentation": "Uniform Resource Identifier - This should be a path to a file accessible to the Live system (eg. a http:// URI) depending on the output type. For example, a RTMP destination should have a uri simliar to: \"rtmp://fmsserver/live\"." + "locationName": "id", + "documentation": "The unique ID of the input device." }, - "Username": { + "MacAddress": { "shape": "__string", - "locationName": "username", - "documentation": "Documentation update needed" + "locationName": "macAddress", + "documentation": "The network MAC address of the input device." + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "A name that you specify for the input device." + }, + "NetworkSettings": { + "shape": "InputDeviceNetworkSettings", + "locationName": "networkSettings", + "documentation": "The network settings for the input device." + }, + "SerialNumber": { + "shape": "__string", + "locationName": "serialNumber", + "documentation": "The unique serial number of the input device." + }, + "Type": { + "shape": "InputDeviceType", + "locationName": "type", + "documentation": "The type of the input device." + } + }, + "documentation": "An input device." + }, + "InputDeviceActiveInput": { + "type": "string", + "documentation": "The source at the input device that is currently active.", + "enum": [ + "HDMI", + "SDI" + ] + }, + "InputDeviceConfigurableSettings": { + "type": "structure", + "members": { + "ConfiguredInput": { + "shape": "InputDeviceConfiguredInput", + "locationName": "configuredInput", + "documentation": "The input source that you want to use. If the device has a source connected to only one of its input ports, or if you don't care which source the device sends, specify Auto. If the device has sources connected to both its input ports, and you want to use a specific source, specify the source." + }, + "MaxBitrate": { + "shape": "__integer", + "locationName": "maxBitrate", + "documentation": "The maximum bitrate in bits per second. Set a value here to throttle the bitrate of the source video." + } + }, + "documentation": "Configurable settings for the input device." + }, + "InputDeviceConfigurationValidationError": { + "type": "structure", + "members": { + "Message": { + "shape": "__string", + "locationName": "message", + "documentation": "The error message." + }, + "ValidationErrors": { + "shape": "__listOfValidationError", + "locationName": "validationErrors", + "documentation": "A collection of validation error responses." + } + }, + "documentation": "Placeholder documentation for InputDeviceConfigurationValidationError" + }, + "InputDeviceConfiguredInput": { + "type": "string", + "documentation": "The source to activate (use) from the input device.", + "enum": [ + "AUTO", + "HDMI", + "SDI" + ] + }, + "InputDeviceConnectionState": { + "type": "string", + "documentation": "The state of the connection between the input device and AWS.", + "enum": [ + "DISCONNECTED", + "CONNECTED" + ] + }, + "InputDeviceHdSettings": { + "type": "structure", + "members": { + "ActiveInput": { + "shape": "InputDeviceActiveInput", + "locationName": "activeInput", + "documentation": "If you specified Auto as the configured input, specifies which of the sources is currently active (SDI or HDMI)." + }, + "ConfiguredInput": { + "shape": "InputDeviceConfiguredInput", + "locationName": "configuredInput", + "documentation": "The source at the input device that is currently active. You can specify this source." + }, + "DeviceState": { + "shape": "InputDeviceState", + "locationName": "deviceState", + "documentation": "The state of the input device." + }, + "Framerate": { + "shape": "__double", + "locationName": "framerate", + "documentation": "The frame rate of the video source." + }, + "Height": { + "shape": "__integer", + "locationName": "height", + "documentation": "The height of the video source, in pixels." + }, + "MaxBitrate": { + "shape": "__integer", + "locationName": "maxBitrate", + "documentation": "The current maximum bitrate for ingesting this source, in bits per second. You can specify this maximum." + }, + "ScanType": { + "shape": "InputDeviceScanType", + "locationName": "scanType", + "documentation": "The scan type of the video source." + }, + "Width": { + "shape": "__integer", + "locationName": "width", + "documentation": "The width of the video source, in pixels." + } + }, + "documentation": "Settings that describe the active source from the input device, and the video characteristics of that source." + }, + "InputDeviceIpScheme": { + "type": "string", + "documentation": "Specifies whether the input device has been configured (outside of MediaLive) to use a dynamic IP address assignment (DHCP) or a static IP address.", + "enum": [ + "STATIC", + "DHCP" + ] + }, + "InputDeviceNetworkSettings": { + "type": "structure", + "members": { + "DnsAddresses": { + "shape": "__listOf__string", + "locationName": "dnsAddresses", + "documentation": "The DNS addresses of the input device." + }, + "Gateway": { + "shape": "__string", + "locationName": "gateway", + "documentation": "The network gateway IP address." + }, + "IpAddress": { + "shape": "__string", + "locationName": "ipAddress", + "documentation": "The IP address of the input device." + }, + "IpScheme": { + "shape": "InputDeviceIpScheme", + "locationName": "ipScheme", + "documentation": "Specifies whether the input device has been configured (outside of MediaLive) to use a dynamic IP address assignment (DHCP) or a static IP address." + }, + "SubnetMask": { + "shape": "__string", + "locationName": "subnetMask", + "documentation": "The subnet mask of the input device." + } + }, + "documentation": "The network settings for the input device." + }, + "InputDeviceRequest": { + "type": "structure", + "members": { + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "The unique ID for the device." + } + }, + "documentation": "Settings for an input device." + }, + "InputDeviceScanType": { + "type": "string", + "documentation": "The scan type of the video source.", + "enum": [ + "INTERLACED", + "PROGRESSIVE" + ] + }, + "InputDeviceSettings": { + "type": "structure", + "members": { + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "The unique ID for the device." + } + }, + "documentation": "Settings for an input device." + }, + "InputDeviceState": { + "type": "string", + "documentation": "The state of the input device.", + "enum": [ + "IDLE", + "STREAMING" + ] + }, + "InputDeviceSummary": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "The unique ARN of the input device." + }, + "ConnectionState": { + "shape": "InputDeviceConnectionState", + "locationName": "connectionState", + "documentation": "The state of the connection between the input device and AWS." + }, + "DeviceSettingsSyncState": { + "shape": "DeviceSettingsSyncState", + "locationName": "deviceSettingsSyncState", + "documentation": "The status of the action to synchronize the device configuration. If you change the configuration of the input device (for example, the maximum bitrate), MediaLive sends the new data to the device. The device might not update itself immediately. SYNCED means the device has updated its configuration. SYNCING means that it has not updated its configuration." + }, + "HdDeviceSettings": { + "shape": "InputDeviceHdSettings", + "locationName": "hdDeviceSettings", + "documentation": "Settings that describe an input device that is type HD." + }, + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "The unique ID of the input device." + }, + "MacAddress": { + "shape": "__string", + "locationName": "macAddress", + "documentation": "The network MAC address of the input device." + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "A name that you specify for the input device." + }, + "NetworkSettings": { + "shape": "InputDeviceNetworkSettings", + "locationName": "networkSettings", + "documentation": "Network settings for the input device." + }, + "SerialNumber": { + "shape": "__string", + "locationName": "serialNumber", + "documentation": "The unique serial number of the input device." + }, + "Type": { + "shape": "InputDeviceType", + "locationName": "type", + "documentation": "The type of the input device." + } + }, + "documentation": "Details of the input device." + }, + "InputDeviceType": { + "type": "string", + "documentation": "The type of the input device. For an AWS Elemental Link device that outputs resolutions up to 1080, choose \"HD\".", + "enum": [ + "HD" + ] + }, + "InputFilter": { + "type": "string", + "documentation": "Input Filter", + "enum": [ + "AUTO", + "DISABLED", + "FORCED" + ] + }, + "InputLocation": { + "type": "structure", + "members": { + "PasswordParam": { + "shape": "__string", + "locationName": "passwordParam", + "documentation": "key used to extract the password from EC2 Parameter store" + }, + "Uri": { + "shape": "__string", + "locationName": "uri", + "documentation": "Uniform Resource Identifier - This should be a path to a file accessible to the Live system (eg. a http:// URI) depending on the output type. For example, a RTMP destination should have a uri simliar to: \"rtmp://fmsserver/live\"." + }, + "Username": { + "shape": "__string", + "locationName": "username", + "documentation": "Documentation update needed" } }, "documentation": "Input Location", @@ -7373,6 +8056,38 @@ "MAX_50_MBPS" ] }, + "InputPreference": { + "type": "string", + "documentation": "Input preference when deciding which input to make active when a previously failed input has recovered.\nIf \\\"EQUAL_INPUT_PREFERENCE\\\", then the active input will stay active as long as it is healthy.\nIf \\\"PRIMARY_INPUT_PREFERRED\\\", then always switch back to the primary input when it is healthy.\n", + "enum": [ + "EQUAL_INPUT_PREFERENCE", + "PRIMARY_INPUT_PREFERRED" + ] + }, + "InputPrepareScheduleActionSettings": { + "type": "structure", + "members": { + "InputAttachmentNameReference": { + "shape": "__string", + "locationName": "inputAttachmentNameReference", + "documentation": "The name of the input attachment that should be prepared by this action. If no name is provided, the action will stop the most recent prepare (if any) when activated." + }, + "InputClippingSettings": { + "shape": "InputClippingSettings", + "locationName": "inputClippingSettings", + "documentation": "Settings to let you create a clip of the file input, in order to set up the input to ingest only a portion of the file." + }, + "UrlPath": { + "shape": "__listOf__string", + "locationName": "urlPath", + "documentation": "The value for the variable portion of the URL for the dynamic input, for this instance of the input. Each time you use the same dynamic input in an input switch action, you can provide a different value, in order to connect the input to a different content source." + } + }, + "documentation": "Action to prepare an input for a future immediate input switch.", + "required": [ + "InputAttachmentNameReference" + ] + }, "InputResolution": { "type": "string", "documentation": "Input resolution based on lines of vertical resolution in the input; SD is less than 720 lines, HD is 720 to 1080 lines, UHD is greater than 1080 lines\n", @@ -7482,6 +8197,11 @@ "locationName": "networkInputSettings", "documentation": "Input settings." }, + "Smpte2038DataPreference": { + "shape": "Smpte2038DataPreference", + "locationName": "smpte2038DataPreference", + "documentation": "Specifies whether to extract applicable ancillary data from a SMPTE-2038 source in this input. Applicable data types are captions, timecode, AFD, and SCTE-104 messages.\n- PREFER: Extract from SMPTE-2038 if present in this input, otherwise extract from another source (if any).\n- IGNORE: Never extract any ancillary data from SMPTE-2038." + }, "SourceEndBehavior": { "shape": "InputSourceEndBehavior", "locationName": "sourceEndBehavior", @@ -7626,7 +8346,8 @@ "RTMP_PULL", "URL_PULL", "MP4_FILE", - "MEDIACONNECT" + "MEDIACONNECT", + "INPUT_DEVICE" ], "documentation": "Placeholder documentation for InputType" }, @@ -7777,6 +8498,54 @@ }, "documentation": "Placeholder documentation for ListChannelsResultModel" }, + "ListInputDevicesRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + } + }, + "documentation": "Placeholder documentation for ListInputDevicesRequest" + }, + "ListInputDevicesResponse": { + "type": "structure", + "members": { + "InputDevices": { + "shape": "__listOfInputDeviceSummary", + "locationName": "inputDevices", + "documentation": "The list of input devices." + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token to get additional list results." + } + }, + "documentation": "Placeholder documentation for ListInputDevicesResponse" + }, + "ListInputDevicesResultModel": { + "type": "structure", + "members": { + "InputDevices": { + "shape": "__listOfInputDeviceSummary", + "locationName": "inputDevices", + "documentation": "The list of input devices." + }, + "NextToken": { + "shape": "__string", + "locationName": "nextToken", + "documentation": "A token to get additional list results." + } + }, + "documentation": "The list of input devices owned by the AWS account." + }, "ListInputSecurityGroupsRequest": { "type": "structure", "members": { @@ -8579,7 +9348,7 @@ "SegmentationTime": { "shape": "__doubleMin1", "locationName": "segmentationTime", - "documentation": "The length in seconds of each segment. Required unless markers is set to None_." + "documentation": "The length in seconds of each segment. Required unless markers is set to _none_." }, "TimedMetadataBehavior": { "shape": "M2tsTimedMetadataBehavior", @@ -8829,7 +9598,7 @@ "AcquisitionPointId": { "shape": "__string", "locationName": "acquisitionPointId", - "documentation": "The value of the \"Acquisition Point Identity\" element used in each message placed in the sparse track. Only enabled if sparseTrackType is not \"none\"." + "documentation": "The ID to include in each message in the sparse track. Ignored if sparseTrackType is NONE." }, "AudioOnlyTimecodeControl": { "shape": "SmoothGroupAudioOnlyTimecodeControl", @@ -8904,7 +9673,7 @@ "SparseTrackType": { "shape": "SmoothGroupSparseTrackType", "locationName": "sparseTrackType", - "documentation": "If set to scte35, use incoming SCTE-35 messages to generate a sparse track in this group of MS-Smooth outputs." + "documentation": "Identifies the type of data to place in the sparse track:\n- SCTE35: Insert SCTE-35 messages from the source content. With each message, insert an IDR frame to start a new segment.\n- SCTE35_WITHOUT_SEGMENTATION: Insert SCTE-35 messages from the source content. With each message, insert an IDR frame but don't start a new segment.\n- NONE: Don't generate a sparse track for any outputs in this output group." }, "StreamManifestBehavior": { "shape": "SmoothGroupStreamManifestBehavior", @@ -9016,7 +9785,7 @@ "documentation": "The error message." }, "ValidationErrors": { - "shape": "__listOfMultiplexValidationError", + "shape": "__listOfValidationError", "locationName": "validationErrors", "documentation": "A collection of validation error responses." } @@ -9353,22 +10122,6 @@ }, "documentation": "Placeholder documentation for MultiplexSummary" }, - "MultiplexValidationError": { - "type": "structure", - "members": { - "ElementPath": { - "shape": "__string", - "locationName": "elementPath", - "documentation": "Path to the source of the error." - }, - "ErrorMessage": { - "shape": "__string", - "locationName": "errorMessage", - "documentation": "The error message." - } - }, - "documentation": "Placeholder documentation for MultiplexValidationError" - }, "MultiplexVideoSettings": { "type": "structure", "members": { @@ -10290,6 +11043,11 @@ "locationName": "hlsTimedMetadataSettings", "documentation": "Action to insert HLS metadata" }, + "InputPrepareSettings": { + "shape": "InputPrepareScheduleActionSettings", + "locationName": "inputPrepareSettings", + "documentation": "Action to prepare an input for a future immediate input switch" + }, "InputSwitchSettings": { "shape": "InputSwitchScheduleActionSettings", "locationName": "inputSwitchSettings", @@ -10755,7 +11513,8 @@ "documentation": "Smooth Group Sparse Track Type", "enum": [ "NONE", - "SCTE_35" + "SCTE_35", + "SCTE_35_WITHOUT_SEGMENTATION" ] }, "SmoothGroupStreamManifestBehavior": { @@ -10774,6 +11533,14 @@ "USE_EVENT_START_DATE" ] }, + "Smpte2038DataPreference": { + "type": "string", + "documentation": "Smpte2038 Data Preference", + "enum": [ + "IGNORE", + "PREFER" + ] + }, "SmpteTtDestinationSettings": { "type": "structure", "members": { @@ -11284,6 +12051,54 @@ }, "documentation": "Teletext Source Settings" }, + "TemporalFilterPostFilterSharpening": { + "type": "string", + "documentation": "Temporal Filter Post Filter Sharpening", + "enum": [ + "AUTO", + "DISABLED", + "ENABLED" + ] + }, + "TemporalFilterSettings": { + "type": "structure", + "members": { + "PostFilterSharpening": { + "shape": "TemporalFilterPostFilterSharpening", + "locationName": "postFilterSharpening", + "documentation": "If you enable this filter, the results are the following:\n- If the source content is noisy (it contains excessive digital artifacts), the filter cleans up the source.\n- If the source content is already clean, the filter tends to decrease the bitrate, especially when the rate control mode is QVBR." + }, + "Strength": { + "shape": "TemporalFilterStrength", + "locationName": "strength", + "documentation": "Choose a filter strength. We recommend a strength of 1 or 2. A higher strength might take out good information, resulting in an image that is overly soft." + } + }, + "documentation": "Temporal Filter Settings" + }, + "TemporalFilterStrength": { + "type": "string", + "documentation": "Temporal Filter Strength", + "enum": [ + "AUTO", + "STRENGTH_1", + "STRENGTH_2", + "STRENGTH_3", + "STRENGTH_4", + "STRENGTH_5", + "STRENGTH_6", + "STRENGTH_7", + "STRENGTH_8", + "STRENGTH_9", + "STRENGTH_10", + "STRENGTH_11", + "STRENGTH_12", + "STRENGTH_13", + "STRENGTH_14", + "STRENGTH_15", + "STRENGTH_16" + ] + }, "TimecodeConfig": { "type": "structure", "members": { @@ -11419,7 +12234,8 @@ "members": { "Message": { "shape": "__string", - "locationName": "message" + "locationName": "message", + "documentation": "The error message." }, "ValidationErrors": { "shape": "__listOfValidationError", @@ -11605,6 +12421,11 @@ "locationName": "destinations", "documentation": "Destination settings for PUSH type inputs." }, + "InputDevices": { + "shape": "__listOfInputDeviceRequest", + "locationName": "inputDevices", + "documentation": "Settings for the devices." + }, "InputSecurityGroups": { "shape": "__listOf__string", "locationName": "inputSecurityGroups", @@ -11633,6 +12454,103 @@ }, "documentation": "Placeholder documentation for UpdateInput" }, + "UpdateInputDevice": { + "type": "structure", + "members": { + "HdDeviceSettings": { + "shape": "InputDeviceConfigurableSettings", + "locationName": "hdDeviceSettings", + "documentation": "The settings that you want to apply to the input device." + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "The name that you assigned to this input device (not the unique ID)." + } + }, + "documentation": "Updates an input device." + }, + "UpdateInputDeviceRequest": { + "type": "structure", + "members": { + "HdDeviceSettings": { + "shape": "InputDeviceConfigurableSettings", + "locationName": "hdDeviceSettings", + "documentation": "The settings that you want to apply to the input device." + }, + "InputDeviceId": { + "shape": "__string", + "location": "uri", + "locationName": "inputDeviceId", + "documentation": "The unique ID of the input device. For example, hd-123456789abcdef." + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "The name that you assigned to this input device (not the unique ID)." + } + }, + "documentation": "A request to update an input device.", + "required": [ + "InputDeviceId" + ] + }, + "UpdateInputDeviceResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn", + "documentation": "The unique ARN of the input device." + }, + "ConnectionState": { + "shape": "InputDeviceConnectionState", + "locationName": "connectionState", + "documentation": "The state of the connection between the input device and AWS." + }, + "DeviceSettingsSyncState": { + "shape": "DeviceSettingsSyncState", + "locationName": "deviceSettingsSyncState", + "documentation": "The status of the action to synchronize the device configuration. If you change the configuration of the input device (for example, the maximum bitrate), MediaLive sends the new data to the device. The device might not update itself immediately. SYNCED means the device has updated its configuration. SYNCING means that it has not updated its configuration." + }, + "HdDeviceSettings": { + "shape": "InputDeviceHdSettings", + "locationName": "hdDeviceSettings", + "documentation": "Settings that describe an input device that is type HD." + }, + "Id": { + "shape": "__string", + "locationName": "id", + "documentation": "The unique ID of the input device." + }, + "MacAddress": { + "shape": "__string", + "locationName": "macAddress", + "documentation": "The network MAC address of the input device." + }, + "Name": { + "shape": "__string", + "locationName": "name", + "documentation": "A name that you specify for the input device." + }, + "NetworkSettings": { + "shape": "InputDeviceNetworkSettings", + "locationName": "networkSettings", + "documentation": "The network settings for the input device." + }, + "SerialNumber": { + "shape": "__string", + "locationName": "serialNumber", + "documentation": "The unique serial number of the input device." + }, + "Type": { + "shape": "InputDeviceType", + "locationName": "type", + "documentation": "The type of the input device." + } + }, + "documentation": "Placeholder documentation for UpdateInputDeviceResponse" + }, "UpdateInputRequest": { "type": "structure", "members": { @@ -11641,6 +12559,11 @@ "locationName": "destinations", "documentation": "Destination settings for PUSH type inputs." }, + "InputDevices": { + "shape": "__listOfInputDeviceRequest", + "locationName": "inputDevices", + "documentation": "Settings for the devices." + }, "InputId": { "shape": "__string", "location": "uri", @@ -11922,11 +12845,13 @@ "members": { "ElementPath": { "shape": "__string", - "locationName": "elementPath" + "locationName": "elementPath", + "documentation": "Path to the source of the error." }, "ErrorMessage": { "shape": "__string", - "locationName": "errorMessage" + "locationName": "errorMessage", + "documentation": "The error message." } }, "documentation": "Placeholder documentation for ValidationError" @@ -11970,12 +12895,12 @@ "RespondToAfd": { "shape": "VideoDescriptionRespondToAfd", "locationName": "respondToAfd", - "documentation": "Indicates how to respond to the AFD values in the input stream. RESPOND causes input video to be clipped, depending on the AFD value, input display aspect ratio, and output display aspect ratio, and (except for FRAMECAPTURE codec) includes the values in the output. PASSTHROUGH (does not apply to FRAMECAPTURE codec) ignores the AFD values and includes the values in the output, so input video is not clipped. NONE ignores the AFD values and does not include the values through to the output, so input video is not clipped." + "documentation": "Indicates how to respond to the AFD values in the input stream. RESPOND causes input video to be clipped, depending on the AFD value, input display aspect ratio, and output display aspect ratio, and (except for FRAME_CAPTURE codec) includes the values in the output. PASSTHROUGH (does not apply to FRAME_CAPTURE codec) ignores the AFD values and includes the values in the output, so input video is not clipped. NONE ignores the AFD values and does not include the values through to the output, so input video is not clipped." }, "ScalingBehavior": { "shape": "VideoDescriptionScalingBehavior", "locationName": "scalingBehavior", - "documentation": "STRETCHTOOUTPUT configures the output position to stretch the video to the specified output resolution (height and width). This option will override any position value. DEFAULT may insert black boxes (pillar boxes or letter boxes) around the video to provide the specified output resolution." + "documentation": "STRETCH_TO_OUTPUT configures the output position to stretch the video to the specified output resolution (height and width). This option will override any position value. DEFAULT may insert black boxes (pillar boxes or letter boxes) around the video to provide the specified output resolution." }, "Sharpness": { "shape": "__integerMin0Max100", @@ -12418,6 +13343,13 @@ }, "documentation": "Placeholder documentation for __listOfAudioSelector" }, + "__listOfAudioTrack": { + "type": "list", + "member": { + "shape": "AudioTrack" + }, + "documentation": "Placeholder documentation for __listOfAudioTrack" + }, "__listOfCaptionDescription": { "type": "list", "member": { @@ -12495,6 +13427,27 @@ }, "documentation": "Placeholder documentation for __listOfInputDestinationRequest" }, + "__listOfInputDeviceRequest": { + "type": "list", + "member": { + "shape": "InputDeviceRequest" + }, + "documentation": "Placeholder documentation for __listOfInputDeviceRequest" + }, + "__listOfInputDeviceSettings": { + "type": "list", + "member": { + "shape": "InputDeviceSettings" + }, + "documentation": "Placeholder documentation for __listOfInputDeviceSettings" + }, + "__listOfInputDeviceSummary": { + "type": "list", + "member": { + "shape": "InputDeviceSummary" + }, + "documentation": "Placeholder documentation for __listOfInputDeviceSummary" + }, "__listOfInputSecurityGroup": { "type": "list", "member": { @@ -12572,13 +13525,6 @@ }, "documentation": "Placeholder documentation for __listOfMultiplexSummary" }, - "__listOfMultiplexValidationError": { - "type": "list", - "member": { - "shape": "MultiplexValidationError" - }, - "documentation": "Placeholder documentation for __listOfMultiplexValidationError" - }, "__listOfOffering": { "type": "list", "member": { diff --git a/services/medialive/src/main/resources/codegen-resources/waiters-2.json b/services/medialive/src/main/resources/codegen-resources/waiters-2.json index 6b9f74cbce8e..c0e618d7b056 100644 --- a/services/medialive/src/main/resources/codegen-resources/waiters-2.json +++ b/services/medialive/src/main/resources/codegen-resources/waiters-2.json @@ -61,7 +61,7 @@ "description": "Wait until a channel has is stopped", "operation": "DescribeChannel", "delay": 5, - "maxAttempts": 28, + "maxAttempts": 60, "acceptors": [ { "state": "success", @@ -86,6 +86,87 @@ "description": "Wait until a channel has been deleted", "operation": "DescribeChannel", "delay": 5, + "maxAttempts": 84, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "State", + "expected": "DELETED" + }, + { + "state": "retry", + "matcher": "path", + "argument": "State", + "expected": "DELETING" + }, + { + "state": "retry", + "matcher": "status", + "expected": 500 + } + ] + }, + "InputAttached": { + "description": "Wait until an input has been attached", + "operation": "DescribeInput", + "delay": 5, + "maxAttempts": 20, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "State", + "expected": "ATTACHED" + }, + { + "state": "retry", + "matcher": "path", + "argument": "State", + "expected": "DETACHED" + }, + { + "state": "retry", + "matcher": "status", + "expected": 500 + } + ] + }, + "InputDetached": { + "description": "Wait until an input has been detached", + "operation": "DescribeInput", + "delay": 5, + "maxAttempts": 84, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "State", + "expected": "DETACHED" + }, + { + "state": "retry", + "matcher": "path", + "argument": "State", + "expected": "CREATING" + }, + { + "state": "retry", + "matcher": "path", + "argument": "State", + "expected": "ATTACHED" + }, + { + "state": "retry", + "matcher": "status", + "expected": 500 + } + ] + }, + "InputDeleted": { + "description": "Wait until an input has been deleted", + "operation": "DescribeInput", + "delay": 5, "maxAttempts": 20, "acceptors": [ { diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index befd700c0b74..2a9d2e83cf9d 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index ac8c72942949..58616b0195dd 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediapackagevod/src/main/resources/codegen-resources/service-2.json b/services/mediapackagevod/src/main/resources/codegen-resources/service-2.json index 784753939cbd..70c31d3c672b 100644 --- a/services/mediapackagevod/src/main/resources/codegen-resources/service-2.json +++ b/services/mediapackagevod/src/main/resources/codegen-resources/service-2.json @@ -444,6 +444,85 @@ "documentation": "A collection of MediaPackage VOD PackagingGroup resources.", "shape": "ListPackagingGroupsResponse" } + }, + "ListTagsForResource": { + "documentation": "Returns a list of the tags assigned to the specified resource.", + "errors": [], + "http": { + "method": "GET", + "requestUri": "/tags/{resource-arn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "name": "ListTagsForResource", + "output": { + "documentation": "200 response", + "shape": "ListTagsForResourceResponse" + } + }, + "TagResource": { + "documentation": "Adds tags to the specified resource. You can specify one or more tags to add.", + "errors": [], + "http": { + "method": "POST", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "TagResourceRequest" + }, + "name": "TagResource" + }, + "UntagResource": { + "documentation": "Removes tags from the specified resource. You can specify one or more tags to remove.", + "errors": [], + "http": { + "method": "DELETE", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "name": "UntagResource" + }, + "UpdatePackagingGroup": { + "documentation": "Updates a specific packaging group. You can't change the id attribute or any other system-generated attributes.", + "errors": [ + { + "shape": "UnprocessableEntityException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "TooManyRequestsException" + } + ], + "http": { + "method": "PUT", + "requestUri": "/packaging_groups/{id}", + "responseCode": 200 + }, + "input": { + "shape": "UpdatePackagingGroupRequest" + }, + "name": "UpdatePackagingGroup", + "output": { + "documentation": "The updated MediaPackage VOD PackagingGroup resource.", + "shape": "UpdatePackagingGroupResponse" + } } }, "shapes": { @@ -497,6 +576,10 @@ "documentation": "The IAM role_arn used to access the source S3 bucket.", "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -528,6 +611,10 @@ "documentation": "The IAM role ARN used to access the source S3 bucket.", "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -591,8 +678,32 @@ "documentation": "The IAM role ARN used to access the source S3 bucket.", "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" + } + }, + "type": "structure" + }, + "Authorization": { + "documentation": "CDN Authorization credentials", + "members": { + "CdnIdentifierSecret": { + "documentation": "The Amazon Resource Name (ARN) for the secret in AWS Secrets Manager that is used for CDN authorization.", + "locationName": "cdnIdentifierSecret", + "shape": "__string" + }, + "SecretsRoleArn": { + "documentation": "The Amazon Resource Name (ARN) for the IAM role that allows MediaPackage to communicate with AWS Secrets Manager.", + "locationName": "secretsRoleArn", + "shape": "__string" } }, + "required": [ + "SecretsRoleArn", + "CdnIdentifierSecret" + ], "type": "structure" }, "CmafEncryption": { @@ -658,6 +769,10 @@ "documentation": "The IAM role ARN used to access the source S3 bucket.", "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -709,6 +824,10 @@ "documentation": "The IAM role_arn used to access the source S3 bucket.", "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -741,6 +860,10 @@ "documentation": "The ID of a PackagingGroup.", "locationName": "packagingGroupId", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -781,6 +904,10 @@ "documentation": "The ID of a PackagingGroup.", "locationName": "packagingGroupId", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -788,10 +915,18 @@ "CreatePackagingGroupRequest": { "documentation": "A new MediaPackage VOD PackagingGroup resource configuration.", "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "Id": { "documentation": "The ID of the PackagingGroup.", "locationName": "id", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -806,6 +941,10 @@ "locationName": "arn", "shape": "__string" }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "DomainName": { "documentation": "The fully qualified domain name for Assets in the PackagingGroup.", "locationName": "domainName", @@ -815,6 +954,10 @@ "documentation": "The ID of the PackagingGroup.", "locationName": "id", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -1004,6 +1147,10 @@ "documentation": "The IAM role_arn used to access the source S3 bucket.", "locationName": "sourceRoleArn", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -1054,6 +1201,10 @@ "documentation": "The ID of a PackagingGroup.", "locationName": "packagingGroupId", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -1079,6 +1230,10 @@ "locationName": "arn", "shape": "__string" }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "DomainName": { "documentation": "The fully qualified domain name for Assets in the PackagingGroup.", "locationName": "domainName", @@ -1088,6 +1243,10 @@ "documentation": "The ID of the PackagingGroup.", "locationName": "id", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -1337,6 +1496,30 @@ }, "type": "structure" }, + "ListTagsForResourceRequest": { + "members": { + "ResourceArn": { + "documentation": "The Amazon Resource Name (ARN) for the resource. You can get this from the response to any request to the resource.", + "location": "uri", + "locationName": "resource-arn", + "shape": "__string" + } + }, + "required": [ + "ResourceArn" + ], + "type": "structure" + }, + "ListTagsForResourceResponse": { + "members": { + "Tags": { + "documentation": "A collection of tags associated with a resource", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "type": "structure" + }, "ManifestLayout": { "enum": [ "FULL", @@ -1447,6 +1630,10 @@ "documentation": "The ID of a PackagingGroup.", "locationName": "packagingGroupId", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -1479,6 +1666,10 @@ "documentation": "The ID of a PackagingGroup.", "locationName": "packagingGroupId", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -1511,6 +1702,10 @@ "locationName": "arn", "shape": "__string" }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "DomainName": { "documentation": "The fully qualified domain name for Assets in the PackagingGroup.", "locationName": "domainName", @@ -1520,6 +1715,10 @@ "documentation": "The ID of the PackagingGroup.", "locationName": "id", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "type": "structure" @@ -1527,10 +1726,18 @@ "PackagingGroupCreateParameters": { "documentation": "Parameters used to create a new MediaPackage VOD PackagingGroup resource.", "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, "Id": { "documentation": "The ID of the PackagingGroup.", "locationName": "id", "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" } }, "required": [ @@ -1554,6 +1761,16 @@ }, "type": "structure" }, + "PackagingGroupUpdateParameters": { + "documentation": "Parameters used to update a MediaPackage packaging group.", + "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + } + }, + "type": "structure" + }, "Profile": { "enum": [ "NONE", @@ -1638,6 +1855,49 @@ }, "type": "structure" }, + "TagResourceRequest": { + "members": { + "ResourceArn": { + "documentation": "The Amazon Resource Name (ARN) for the resource. You can get this from the response to any request to the resource.", + "location": "uri", + "locationName": "resource-arn", + "shape": "__string" + }, + "Tags": { + "documentation": "A collection of tags associated with a resource", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "required": [ + "ResourceArn", + "Tags" + ], + "type": "structure" + }, + "Tags": { + "documentation": "A collection of tags associated with a resource", + "key": { + "shape": "__string" + }, + "type": "map", + "value": { + "shape": "__string" + } + }, + "TagsModel": { + "members": { + "Tags": { + "documentation": "A collection of tags associated with a resource", + "locationName": "tags", + "shape": "__mapOf__string" + } + }, + "required": [ + "Tags" + ], + "type": "structure" + }, "TooManyRequestsException": { "documentation": "The client has exceeded their resource or throttling limits.", "error": { @@ -1666,6 +1926,74 @@ }, "type": "structure" }, + "UntagResourceRequest": { + "members": { + "ResourceArn": { + "documentation": "The Amazon Resource Name (ARN) for the resource. You can get this from the response to any request to the resource.", + "location": "uri", + "locationName": "resource-arn", + "shape": "__string" + }, + "TagKeys": { + "documentation": "A comma-separated list of the tag keys to remove from the resource.", + "location": "querystring", + "locationName": "tagKeys", + "shape": "__listOf__string" + } + }, + "required": [ + "TagKeys", + "ResourceArn" + ], + "type": "structure" + }, + "UpdatePackagingGroupRequest": { + "documentation": "A MediaPackage VOD PackagingGroup resource configuration.", + "members": { + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, + "Id": { + "documentation": "The ID of a MediaPackage VOD PackagingGroup resource.", + "location": "uri", + "locationName": "id", + "shape": "__string" + } + }, + "required": [ + "Id" + ], + "type": "structure" + }, + "UpdatePackagingGroupResponse": { + "members": { + "Arn": { + "documentation": "The ARN of the PackagingGroup.", + "locationName": "arn", + "shape": "__string" + }, + "Authorization": { + "locationName": "authorization", + "shape": "Authorization" + }, + "DomainName": { + "documentation": "The fully qualified domain name for Assets in the PackagingGroup.", + "locationName": "domainName", + "shape": "__string" + }, + "Id": { + "documentation": "The ID of the PackagingGroup.", + "locationName": "id", + "shape": "__string" + }, + "Tags": { + "locationName": "tags", + "shape": "Tags" + } + }, + "type": "structure" + }, "__PeriodTriggersElement": { "enum": [ "ADS" @@ -1738,6 +2066,15 @@ "__long": { "type": "long" }, + "__mapOf__string": { + "key": { + "shape": "__string" + }, + "type": "map", + "value": { + "shape": "__string" + } + }, "__string": { "type": "string" } diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index ba369456c7a1..733d32610722 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index e39f55f653b2..c99172d83204 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 mediastoredata diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index 16f67b9ec50d..2ced5ca88fe7 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/mediatailor/src/main/resources/codegen-resources/service-2.json b/services/mediatailor/src/main/resources/codegen-resources/service-2.json index 69d38d5e7f32..5c0ddda05cdd 100644 --- a/services/mediatailor/src/main/resources/codegen-resources/service-2.json +++ b/services/mediatailor/src/main/resources/codegen-resources/service-2.json @@ -141,6 +141,20 @@ } }, "shapes": { + "AvailSuppression": { + "type": "structure", + "documentation" : "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

", + "members": { + "Mode": { + "documentation": "Sets the mode for avail suppression, also known as ad suppression. By default, ad suppression is off and all ad breaks are filled by MediaTailor with ads or slate.", + "shape": "Mode" + }, + "Value": { + "documentation": "The avail suppression value is a live edge offset time in HH:MM:SS. MediaTailor won't fill ad breaks on or behind this time in the manifest lookback window. ", + "shape": "__string" + } + } + }, "BadRequestException": { "documentation": "

Invalid request parameters.

", "error": { @@ -154,6 +168,20 @@ }, "type": "structure" }, + "Bumper": { + "type": "structure", + "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break.

", + "members": { + "EndUrl": { + "documentation": "

The URL for the end bumper asset.

", + "shape": "__string" + }, + "StartUrl": { + "documentation": "

The URL for the start bumper asset.

", + "shape": "__string" + } + } + }, "CdnConfiguration": { "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", "members": { @@ -242,6 +270,14 @@ "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

", "shape": "__string" }, + "AvailSuppression": { + "shape": "AvailSuppression", + "documentation": "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

" + }, + "Bumper": { + "shape": "Bumper", + "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break.

" + }, "CdnConfiguration": { "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", "shape": "CdnConfiguration" @@ -249,7 +285,7 @@ "DashConfiguration": { "documentation": "

The configuration for DASH content.

", "shape": "DashConfiguration" - }, + }, "HlsConfiguration": { "documentation": "

The configuration for HLS content.

", "shape": "HlsConfiguration" @@ -368,14 +404,29 @@ "MULTI_PERIOD" ], "type": "string" - }, + }, + "Mode": { + "enum": [ + "OFF", + "BEHIND_LIVE_EDGE" + ], + "type": "string" + }, "PlaybackConfiguration": { "documentation": "

The AWSMediaTailor configuration.

", "members": { "AdDecisionServerUrl": { "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

", "shape": "__string" - }, + }, + "AvailSuppression":{ + "documentation": "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

", + "shape": "AvailSuppression" + }, + "Bumper": { + "shape": "Bumper", + "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break.

" + }, "CdnConfiguration": { "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", "shape": "CdnConfiguration" @@ -447,7 +498,15 @@ "AdDecisionServerUrl": { "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing you can provide a static VAST URL. The maximum length is 25,000 characters.

", "shape": "__string" - }, + }, + "AvailSuppression" : { + "shape" : "AvailSuppression", + "documentation": "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

" + }, + "Bumper": { + "shape": "Bumper", + "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break.

" + }, "CdnConfiguration": { "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", "shape": "CdnConfiguration" @@ -471,7 +530,7 @@ "SlateAdUrl": { "documentation": "

The URL for a high-quality video asset to transcode and use to fill in time that's not used by ads. AWS Elemental MediaTailor shows the slate to fill in gaps in media content. Configuring the slate is optional for non-VPAID configurations. For VPAID, the slate is required because MediaTailor provides it in the slots that are designated for dynamic ad content. The slate must be a high-quality asset that contains both audio and video.

", "shape": "__string" - }, + }, "Tags": { "documentation": "

The tags to assign to the playback configuration.

", "locationName": "tags", @@ -493,7 +552,15 @@ "AdDecisionServerUrl": { "documentation": "

The URL for the ad decision server (ADS). This includes the specification of static parameters and placeholders for dynamic parameters. AWS Elemental MediaTailor substitutes player-specific and session-specific parameters as needed when calling the ADS. Alternately, for testing, you can provide a static VAST URL. The maximum length is 25,000 characters.

", "shape": "__string" - }, + }, + "AvailSuppression" : { + "shape" : "AvailSuppression", + "documentation": "

The configuration for Avail Suppression. Ad suppression can be used to turn off ad personalization in a long manifest, or if a viewer joins mid-break.

" + }, + "Bumper": { + "shape": "Bumper", + "documentation": "

The configuration for bumpers. Bumpers are short audio or video clips that play at the start or before the end of an ad break.

" + }, "CdnConfiguration": { "documentation": "

The configuration for using a content delivery network (CDN), like Amazon CloudFront, for content and ad segment management.

", "shape": "CdnConfiguration" @@ -501,7 +568,7 @@ "DashConfiguration": { "documentation": "

The configuration for DASH content.

", "shape": "DashConfiguration" - }, + }, "HlsConfiguration": { "documentation": "

The configuration for HLS content.

", "shape": "HlsConfiguration" diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index a88b87de01a8..48c92aba5c7e 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhub/src/main/resources/codegen-resources/service-2.json b/services/migrationhub/src/main/resources/codegen-resources/service-2.json index b744e2f71506..ba8a920735d8 100644 --- a/services/migrationhub/src/main/resources/codegen-resources/service-2.json +++ b/services/migrationhub/src/main/resources/codegen-resources/service-2.json @@ -22,6 +22,7 @@ "output":{"shape":"AssociateCreatedArtifactResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, @@ -42,6 +43,7 @@ "output":{"shape":"AssociateDiscoveredResourceResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, @@ -63,6 +65,7 @@ "output":{"shape":"CreateProgressUpdateStreamResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, @@ -82,6 +85,7 @@ "output":{"shape":"DeleteProgressUpdateStreamResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, @@ -102,6 +106,7 @@ "output":{"shape":"DescribeApplicationStateResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, @@ -121,6 +126,7 @@ "output":{"shape":"DescribeMigrationTaskResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, @@ -139,6 +145,7 @@ "output":{"shape":"DisassociateCreatedArtifactResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, @@ -159,6 +166,7 @@ "output":{"shape":"DisassociateDiscoveredResourceResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, @@ -179,6 +187,7 @@ "output":{"shape":"ImportMigrationTaskResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, @@ -199,6 +208,7 @@ "output":{"shape":"ListApplicationStatesResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, @@ -216,6 +226,7 @@ "output":{"shape":"ListCreatedArtifactsResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, @@ -234,6 +245,7 @@ "output":{"shape":"ListDiscoveredResourcesResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, @@ -252,6 +264,7 @@ "output":{"shape":"ListMigrationTasksResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, @@ -271,6 +284,7 @@ "output":{"shape":"ListProgressUpdateStreamsResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, @@ -288,6 +302,7 @@ "output":{"shape":"NotifyApplicationStateResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, @@ -309,6 +324,7 @@ "output":{"shape":"NotifyMigrationTaskStateResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, @@ -329,6 +345,7 @@ "output":{"shape":"PutResourceAttributesResult"}, "errors":[ {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, @@ -352,7 +369,8 @@ "ApplicationId":{ "type":"string", "max":1600, - "min":1 + "min":1, + "pattern":"^.{1,1600}$" }, "ApplicationIds":{ "type":"list", @@ -456,7 +474,9 @@ }, "ConfigurationId":{ "type":"string", - "min":1 + "max":1600, + "min":1, + "pattern":"^.{1,1600}$" }, "CreateProgressUpdateStreamRequest":{ "type":"structure", @@ -495,7 +515,8 @@ "CreatedArtifactDescription":{ "type":"string", "max":500, - "min":0 + "min":0, + "pattern":"^.{0,500}$" }, "CreatedArtifactList":{ "type":"list", @@ -655,7 +676,8 @@ "DiscoveredResourceDescription":{ "type":"string", "max":500, - "min":0 + "min":0, + "pattern":"^.{0,500}$" }, "DiscoveredResourceList":{ "type":"list", @@ -1156,12 +1178,14 @@ "ResourceAttributeValue":{ "type":"string", "max":256, - "min":1 + "min":1, + "pattern":"^.{1,256}$" }, "ResourceName":{ "type":"string", "max":1600, - "min":1 + "min":1, + "pattern":"^.{1,1600}$" }, "ResourceNotFoundException":{ "type":"structure", @@ -1171,6 +1195,7 @@ "documentation":"

Exception raised when the request references a resource (Application Discovery Service configuration, update stream, migration task, etc.) that does not exist in Application Discovery Service (Application Discovery Service) or in Migration Hub's repository.

", "exception":true }, + "RetryAfterSeconds":{"type":"integer"}, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -1192,7 +1217,8 @@ "StatusDetail":{ "type":"string", "max":500, - "min":0 + "min":0, + "pattern":"^.{0,500}$" }, "Task":{ "type":"structure", @@ -1213,7 +1239,28 @@ }, "documentation":"

Task object encapsulating task information.

" }, - "Token":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"ErrorMessage", + "documentation":"

A message that provides information about the exception.

" + }, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

The number of seconds the caller should wait before retrying.

" + } + }, + "documentation":"

The request was denied due to request throttling.

", + "exception":true + }, + "Token":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^[a-zA-Z0-9\\/\\+\\=]{0,2048}$" + }, "UnauthorizedOperation":{ "type":"structure", "members":{ diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index 6db2fa31100a..286ccb3d7e2e 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/migrationhubconfig/src/main/resources/codegen-resources/service-2.json b/services/migrationhubconfig/src/main/resources/codegen-resources/service-2.json index 7534a7068276..10e600c096c3 100644 --- a/services/migrationhubconfig/src/main/resources/codegen-resources/service-2.json +++ b/services/migrationhubconfig/src/main/resources/codegen-resources/service-2.json @@ -25,6 +25,7 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"DryRunOperation"}, {"shape":"InvalidInputException"} ], @@ -42,9 +43,10 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InvalidInputException"} ], - "documentation":"

This API permits filtering on the ControlId, HomeRegion, and RegionControlScope fields.

" + "documentation":"

This API permits filtering on the ControlId and HomeRegion fields.

" }, "GetHomeRegion":{ "name":"GetHomeRegion", @@ -58,6 +60,7 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, {"shape":"InvalidInputException"} ], "documentation":"

Returns the calling account’s home region, if configured. This API is used by other AWS services to determine the regional endpoint for calling AWS Application Discovery Service and Migration Hub. You must call GetHomeRegion at least once before you call any other AWS Application Discovery Service and AWS Migration Hub APIs, to obtain the account's Migration Hub home region.

" @@ -227,6 +230,7 @@ "exception":true }, "RequestedTime":{"type":"timestamp"}, + "RetryAfterSeconds":{"type":"integer"}, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -261,6 +265,19 @@ "type":"string", "enum":["ACCOUNT"] }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"ErrorMessage"}, + "RetryAfterSeconds":{ + "shape":"RetryAfterSeconds", + "documentation":"

The number of seconds the caller should wait before retrying.

" + } + }, + "documentation":"

The request was denied due to request throttling.

", + "exception":true + }, "Token":{ "type":"string", "max":2048, @@ -268,5 +285,5 @@ "pattern":"^[a-zA-Z0-9\\/\\+\\=]{0,2048}$" } }, - "documentation":"

The AWS Migration Hub home region APIs are available specifically for working with your Migration Hub home region. You can use these APIs to determine a home region, as well as to create and work with controls that describe the home region.

You can use these APIs within your home region only. If you call these APIs from outside your home region, your calls are rejected, except for the ability to register your agents and connectors.

You must call GetHomeRegion at least once before you call any other AWS Application Discovery Service and AWS Migration Hub APIs, to obtain the account's Migration Hub home region.

The StartDataCollection API call in AWS Application Discovery Service allows your agents and connectors to begin collecting data that flows directly into the home region, and it will prevent you from enabling data collection information to be sent outside the home region.

For specific API usage, see the sections that follow in this AWS Migration Hub Home Region API reference.

The Migration Hub Home Region APIs do not support AWS Organizations.

" + "documentation":"

The AWS Migration Hub home region APIs are available specifically for working with your Migration Hub home region. You can use these APIs to determine a home region, as well as to create and work with controls that describe the home region.

  • You must make API calls for write actions (create, notify, associate, disassociate, import, or put) while in your home region, or a HomeRegionNotSetException error is returned.

  • API calls for read actions (list, describe, stop, and delete) are permitted outside of your home region.

  • If you call a write API outside the home region, an InvalidInputException is returned.

  • You can call GetHomeRegion action to obtain the account's Migration Hub home region.

For specific API usage, see the sections that follow in this AWS Migration Hub Home Region API reference.

" } diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index 9b5a3bf49cf9..cd331558081f 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 mobile diff --git a/services/mq/pom.xml b/services/mq/pom.xml index 7c3ad1e6dcc7..08ff935179eb 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 mq diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index cb0bd2441db7..bfc2b59505bc 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 2e1c5ecf4a8a..eabfb0dcef62 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index c692e7dd3fb2..4a73b736def0 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index 5f76ea690107..7e1979559acc 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index a02cd892d414..bed8a2507170 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/opsworkscm/src/main/resources/codegen-resources/paginators-1.json b/services/opsworkscm/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..95d97466d956 100644 --- a/services/opsworkscm/src/main/resources/codegen-resources/paginators-1.json +++ b/services/opsworkscm/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,28 @@ { "pagination": { + "DescribeBackups": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Backups" + }, + "DescribeEvents": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ServerEvents" + }, + "DescribeServers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Servers" + }, + "ListTagsForResource": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Tags" + } } -} +} \ No newline at end of file diff --git a/services/opsworkscm/src/main/resources/codegen-resources/service-2.json b/services/opsworkscm/src/main/resources/codegen-resources/service-2.json index 0b2b2cd3a31b..562d9ec655de 100644 --- a/services/opsworkscm/src/main/resources/codegen-resources/service-2.json +++ b/services/opsworkscm/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Associates a new node with the server. For more information about how to disassociate a node, see DisassociateNode.

On a Chef server: This command is an alternative to knife bootstrap.

Example (Chef): aws opsworks-cm associate-node --server-name MyServer --node-name MyManagedNode --engine-attributes \"Name=CHEF_ORGANIZATION,Value=default\" \"Name=CHEF_NODE_PUBLIC_KEY,Value=public-key-pem\"

On a Puppet server, this command is an alternative to the puppet cert sign command that signs a Puppet node CSR.

Example (Chef): aws opsworks-cm associate-node --server-name MyServer --node-name MyManagedNode --engine-attributes \"Name=PUPPET_NODE_CSR,Value=csr-pem\"

A node can can only be associated with servers that are in a HEALTHY state. Otherwise, an InvalidStateException is thrown. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are not valid. The AssociateNode API call can be integrated into Auto Scaling configurations, AWS Cloudformation templates, or the user data of a server's instance.

" + "documentation":"

Associates a new node with the server. For more information about how to disassociate a node, see DisassociateNode.

On a Chef server: This command is an alternative to knife bootstrap.

Example (Chef): aws opsworks-cm associate-node --server-name MyServer --node-name MyManagedNode --engine-attributes \"Name=CHEF_ORGANIZATION,Value=default\" \"Name=CHEF_NODE_PUBLIC_KEY,Value=public-key-pem\"

On a Puppet server, this command is an alternative to the puppet cert sign command that signs a Puppet node CSR.

Example (Puppet): aws opsworks-cm associate-node --server-name MyServer --node-name MyManagedNode --engine-attributes \"Name=PUPPET_NODE_CSR,Value=csr-pem\"

A node can can only be associated with servers that are in a HEALTHY state. Otherwise, an InvalidStateException is thrown. A ResourceNotFoundException is thrown when the server does not exist. A ValidationException is raised when parameters of the request are not valid. The AssociateNode API call can be integrated into Auto Scaling configurations, AWS Cloudformation templates, or the user data of a server's instance.

" }, "CreateBackup":{ "name":"CreateBackup", @@ -529,6 +529,7 @@ "CreateServerRequest":{ "type":"structure", "required":[ + "Engine", "ServerName", "InstanceProfileArn", "InstanceType", @@ -1428,5 +1429,5 @@ "exception":true } }, - "documentation":"AWS OpsWorks CM

AWS OpsWorks for configuration management (CM) is a service that runs and manages configuration management servers. You can use AWS OpsWorks CM to create and manage AWS OpsWorks for Chef Automate and AWS OpsWorks for Puppet Enterprise servers, and add or remove nodes for the servers to manage.

Glossary of terms

  • Server: A configuration management server that can be highly-available. The configuration management server runs on an Amazon Elastic Compute Cloud (EC2) instance, and may use various other AWS services, such as Amazon Relational Database Service (RDS) and Elastic Load Balancing. A server is a generic abstraction over the configuration manager that you want to use, much like Amazon RDS. In AWS OpsWorks CM, you do not start or stop servers. After you create servers, they continue to run until they are deleted.

  • Engine: The engine is the specific configuration manager that you want to use. Valid values in this release include ChefAutomate and Puppet.

  • Backup: This is an application-level backup of the data that the configuration manager stores. AWS OpsWorks CM creates an S3 bucket for backups when you launch the first server. A backup maintains a snapshot of a server's configuration-related attributes at the time the backup starts.

  • Events: Events are always related to a server. Events are written during server creation, when health checks run, when backups are created, when system maintenance is performed, etc. When you delete a server, the server's events are also deleted.

  • Account attributes: Every account has attributes that are assigned in the AWS OpsWorks CM database. These attributes store information about configuration limits (servers, backups, etc.) and your customer account.

Endpoints

AWS OpsWorks CM supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Your servers can only be accessed or managed within the endpoint in which they are created.

  • opsworks-cm.us-east-1.amazonaws.com

  • opsworks-cm.us-east-2.amazonaws.com

  • opsworks-cm.us-west-1.amazonaws.com

  • opsworks-cm.us-west-2.amazonaws.com

  • opsworks-cm.ap-northeast-1.amazonaws.com

  • opsworks-cm.ap-southeast-1.amazonaws.com

  • opsworks-cm.ap-southeast-2.amazonaws.com

  • opsworks-cm.eu-central-1.amazonaws.com

  • opsworks-cm.eu-west-1.amazonaws.com

Throttling limits

All API operations allow for five requests per second with a burst of 10 requests per second.

" + "documentation":"AWS OpsWorks CM

AWS OpsWorks for configuration management (CM) is a service that runs and manages configuration management servers. You can use AWS OpsWorks CM to create and manage AWS OpsWorks for Chef Automate and AWS OpsWorks for Puppet Enterprise servers, and add or remove nodes for the servers to manage.

Glossary of terms

  • Server: A configuration management server that can be highly-available. The configuration management server runs on an Amazon Elastic Compute Cloud (EC2) instance, and may use various other AWS services, such as Amazon Relational Database Service (RDS) and Elastic Load Balancing. A server is a generic abstraction over the configuration manager that you want to use, much like Amazon RDS. In AWS OpsWorks CM, you do not start or stop servers. After you create servers, they continue to run until they are deleted.

  • Engine: The engine is the specific configuration manager that you want to use. Valid values in this release include ChefAutomate and Puppet.

  • Backup: This is an application-level backup of the data that the configuration manager stores. AWS OpsWorks CM creates an S3 bucket for backups when you launch the first server. A backup maintains a snapshot of a server's configuration-related attributes at the time the backup starts.

  • Events: Events are always related to a server. Events are written during server creation, when health checks run, when backups are created, when system maintenance is performed, etc. When you delete a server, the server's events are also deleted.

  • Account attributes: Every account has attributes that are assigned in the AWS OpsWorks CM database. These attributes store information about configuration limits (servers, backups, etc.) and your customer account.

Endpoints

AWS OpsWorks CM supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Your servers can only be accessed or managed within the endpoint in which they are created.

  • opsworks-cm.us-east-1.amazonaws.com

  • opsworks-cm.us-east-2.amazonaws.com

  • opsworks-cm.us-west-1.amazonaws.com

  • opsworks-cm.us-west-2.amazonaws.com

  • opsworks-cm.ap-northeast-1.amazonaws.com

  • opsworks-cm.ap-southeast-1.amazonaws.com

  • opsworks-cm.ap-southeast-2.amazonaws.com

  • opsworks-cm.eu-central-1.amazonaws.com

  • opsworks-cm.eu-west-1.amazonaws.com

For more information, see AWS OpsWorks endpoints and quotas in the AWS General Reference.

Throttling limits

All API operations allow for five requests per second with a burst of 10 requests per second.

" } diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index de30c7274c02..6654895efef6 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/organizations/src/main/resources/codegen-resources/service-2.json b/services/organizations/src/main/resources/codegen-resources/service-2.json index db9de25aba44..e25a8c570403 100644 --- a/services/organizations/src/main/resources/codegen-resources/service-2.json +++ b/services/organizations/src/main/resources/codegen-resources/service-2.json @@ -58,7 +58,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy:

  • Service control policy (SCP) - An SCP specifies what permissions can be delegated to users in affected member accounts. The scope of influence for a policy depends on what you attach the policy to:

    • If you attach an SCP to a root, it affects all accounts in the organization.

    • If you attach an SCP to an OU, it affects all accounts in that OU and in any child OUs.

    • If you attach the policy directly to an account, it affects only that account.

    SCPs are JSON policies that specify the maximum permissions for an organization or organizational unit (OU). You can attach one SCP to a higher level root or OU, and a different SCP to a child OU or to an account. The child policy can further restrict only the permissions that pass through the parent filter and are available to the child. An SCP that is attached to a child can't grant a permission that the parent hasn't already granted. For example, imagine that the parent SCP allows permissions A, B, C, D, and E. The child SCP allows C, D, E, F, and G. The result is that the accounts affected by the child SCP are allowed to use only C, D, and E. They can't use A or B because the child OU filtered them out. They also can't use F and G because the parent OU filtered them out. They can't be granted back by the child SCP; child SCPs can only filter the permissions they receive from the parent SCP.

    AWS Organizations attaches a default SCP named \"FullAWSAccess to every root, OU, and account. This default SCP allows all services and actions, enabling any new child OU or account to inherit the permissions of the parent root or OU. If you detach the default policy, you must replace it with a policy that specifies the permissions that you want to allow in that OU or account.

    For more information about how AWS Organizations policies permissions work, see Using Service Control Policies in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" + "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the AWS Organizations User Guide for information about each policy type:

This operation can be called only from the organization's master account.

" }, "CancelHandshake":{ "name":"CancelHandshake", @@ -120,7 +120,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

This action is available if all of the following are true:

  • You're authorized to create accounts in the AWS GovCloud (US) Region. For more information on the AWS GovCloud (US) Region, see the AWS GovCloud User Guide.

  • You already have an account in the AWS GovCloud (US) Region that is associated with your master account in the commercial Region.

  • You call this action from the master account of your organization in the commercial Region.

  • You have the organizations:CreateGovCloudAccount permission. AWS Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS automatically enables AWS CloudTrail for AWS GovCloud (US) accounts, but you should also do the following:

  • Verify that AWS CloudTrail is enabled to store logs.

  • Create an S3 bucket for AWS CloudTrail log storage.

    For more information, see Verifying AWS CloudTrail Is Enabled in the AWS GovCloud User Guide.

You call this action from the master account of your organization in the commercial Region to create a standalone AWS account in the AWS GovCloud (US) Region. After the account is created, the master account of an organization in the AWS GovCloud (US) Region can invite it to that organization. For more information on inviting standalone accounts in the AWS GovCloud (US) to join an organization, see AWS Organizations in the AWS GovCloud User Guide.

Calling CreateGovCloudAccount is an asynchronous request that AWS performs in the background. Because CreateGovCloudAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

When you call the CreateGovCloudAccount action, you create two accounts: a standalone account in the AWS GovCloud (US) Region and an associated account in the commercial Region for billing and support purposes. The account in the commercial Region is automatically a member of the organization whose credentials made the request. Both accounts are associated with the same email address.

A role is created in the new account in the commercial Region that allows the master account in the organization in the commercial Region to assume it. An AWS GovCloud (US) account is then created and associated with the commercial account that you just created. A role is created in the new AWS GovCloud (US) account that can be assumed by the AWS GovCloud (US) account that is associated with the master account of the commercial organization. For more information and to view a diagram that explains how account access works, see AWS Organizations in the AWS GovCloud User Guide.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateGovCloudAccount to create multiple temporary accounts isn't recommended. You can only close an account from the AWS Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" + "documentation":"

This action is available if all of the following are true:

  • You're authorized to create accounts in the AWS GovCloud (US) Region. For more information on the AWS GovCloud (US) Region, see the AWS GovCloud User Guide.

  • You already have an account in the AWS GovCloud (US) Region that is associated with your master account in the commercial Region.

  • You call this action from the master account of your organization in the commercial Region.

  • You have the organizations:CreateGovCloudAccount permission. AWS Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see AWS Organizations and Service-Linked Roles in the AWS Organizations User Guide.

AWS automatically enables AWS CloudTrail for AWS GovCloud (US) accounts, but you should also do the following:

  • Verify that AWS CloudTrail is enabled to store logs.

  • Create an S3 bucket for AWS CloudTrail log storage.

    For more information, see Verifying AWS CloudTrail Is Enabled in the AWS GovCloud User Guide.

You call this action from the master account of your organization in the commercial Region to create a standalone AWS account in the AWS GovCloud (US) Region. After the account is created, the master account of an organization in the AWS GovCloud (US) Region can invite it to that organization. For more information on inviting standalone accounts in the AWS GovCloud (US) to join an organization, see AWS Organizations in the AWS GovCloud User Guide.

Calling CreateGovCloudAccount is an asynchronous request that AWS performs in the background. Because CreateGovCloudAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

When you call the CreateGovCloudAccount action, you create two accounts: a standalone account in the AWS GovCloud (US) Region and an associated account in the commercial Region for billing and support purposes. The account in the commercial Region is automatically a member of the organization whose credentials made the request. Both accounts are associated with the same email address.

A role is created in the new account in the commercial Region that allows the master account in the organization in the commercial Region to assume it. An AWS GovCloud (US) account is then created and associated with the commercial account that you just created. A role is created in the new AWS GovCloud (US) account that can be assumed by the AWS GovCloud (US) account that is associated with the master account of the commercial organization. For more information and to view a diagram that explains how account access works, see AWS Organizations in the AWS GovCloud User Guide.

For more information about creating accounts, see Creating an AWS Account in Your Organization in the AWS Organizations User Guide.

  • When you create an account in an organization using the AWS Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account is not automatically collected. This includes a payment method and signing the end user license agreement (EULA). If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the AWS Organizations User Guide.

  • If you get an exception that indicates that you exceeded your account limits for the organization, contact AWS Support.

  • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact AWS Support.

  • Using CreateGovCloudAccount to create multiple temporary accounts isn't recommended. You can only close an account from the AWS Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an AWS Account in the AWS Organizations User Guide.

When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

" }, "CreateOrganization":{ "name":"CreateOrganization", @@ -339,7 +339,7 @@ {"shape":"InvalidInputException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Returns the contents of the effective tag policy for the account. The effective tag policy is the aggregation of any tag policies the account inherits, plus any policy directly that is attached to the account.

This action returns information on tag policies only.

For more information on policy inheritance, see How Policy Inheritance Works in the AWS Organizations User Guide.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" + "documentation":"

Returns the contents of the effective policy for specified policy type and account. The effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account.

This operation applies only to policy types other than service control policies (SCPs).

For more information about policy inheritance, see How Policy Inheritance Works in the AWS Organizations User Guide.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" }, "DescribeHandshake":{ "name":"DescribeHandshake", @@ -433,7 +433,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

Detaches a policy from a target root, organizational unit (OU), or account. If the policy being detached is a service control policy (SCP), the changes to permissions for IAM users and roles in affected accounts are immediate.

Note: Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with one that limits the permissions that can be delegated, you must attach the replacement policy before you can remove the default one. This is the authorization strategy of an \"allow list\". If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\".

This operation can be called only from the organization's master account.

" + "documentation":"

Detaches a policy from a target root, organizational unit (OU), or account.

If the policy being detached is a service control policy (SCP), the changes to permissions for AWS Identity and Access Management (IAM) users and roles in affected accounts are immediate.

Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with an SCP that limits the permissions that can be delegated, you must attach the replacement SCP before you can remove the default SCP. This is the authorization strategy of an \"allow list\". If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\".

This operation can be called only from the organization's master account.

" }, "DisableAWSServiceAccess":{ "name":"DisableAWSServiceAccess", @@ -449,7 +449,8 @@ {"shape":"ConstraintViolationException"}, {"shape":"InvalidInputException"}, {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedAPIEndpointException"} ], "documentation":"

Disables the integration of an AWS service (the service that is specified by ServicePrincipal) with AWS Organizations. When you disable integration, the specified service no longer can create a service-linked role in new accounts in your organization. This means the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from AWS Organizations.

We recommend that you disable integration between AWS Organizations and the specified AWS service by using the console or commands that are provided by the specified service. Doing so ensures that the other service is aware that it can clean up any resources that are required only for the integration. How the service cleans up its resources in the organization's accounts depends on that service. For more information, see the documentation for the other AWS service.

After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts unless the operations are explicitly permitted by the IAM policies that are attached to your roles.

For more information about integrating other services with AWS Organizations, including the list of services that work with Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's master account.

" }, @@ -474,7 +475,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

Disables an organizational control policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This is an asynchronous request that AWS performs in the background. If you disable a policy for a root, it still appears enabled for the organization if all features are enabled for the organization. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's master account.

To view the status of available policy types in the organization, use DescribeOrganization.

" + "documentation":"

Disables an organizational policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

This is an asynchronous request that AWS performs in the background. If you disable a policy type for a root, it still appears enabled for the organization if all features are enabled for the organization. AWS recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

This operation can be called only from the organization's master account.

To view the status of available policy types in the organization, use DescribeOrganization.

" }, "EnableAWSServiceAccess":{ "name":"EnableAWSServiceAccess", @@ -490,7 +491,8 @@ {"shape":"ConstraintViolationException"}, {"shape":"InvalidInputException"}, {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedAPIEndpointException"} ], "documentation":"

Enables the integration of an AWS service (the service that is specified by ServicePrincipal) with AWS Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts.

We recommend that you enable integration between AWS Organizations and the specified AWS service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other AWS service.

For more information about enabling services to integrate with AWS Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's master account and only if the organization has enabled all features.

" }, @@ -592,7 +594,8 @@ {"shape":"ConstraintViolationException"}, {"shape":"InvalidInputException"}, {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"UnsupportedAPIEndpointException"} ], "documentation":"

Returns a list of the AWS services that you enabled to integrate with your organization. After a service on this list creates the resources that it requires for the integration, it can perform operations on your organization and its accounts.

For more information about integrating other services with AWS Organizations, including the list of services that currently work with Organizations, see Integrating AWS Organizations with Other AWS Services in the AWS Organizations User Guide.

This operation can be called only from the organization's master account or by a member account that is a delegated administrator for an AWS service.

" }, @@ -1111,6 +1114,7 @@ }, "AccountId":{ "type":"string", + "max":12, "pattern":"^\\d{12}$" }, "AccountJoinedMethod":{ @@ -1232,6 +1236,7 @@ }, "ChildId":{ "type":"string", + "max":100, "pattern":"^(\\d{12})|(ou-[0-9a-z]{4,32}-[a-z0-9]{8,32})$" }, "ChildNotFoundException":{ @@ -1267,7 +1272,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"ConstraintViolationExceptionReason"} }, - "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit.

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get receive this exception when running a command immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact AWS Support.

  • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You can designate only a member account as a delegated administrator.

  • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: To complete this operation, you must first deregister this account as a delegated administrator.

  • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: To complete this operation, you must first deregister all delegated administrators for this service.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide contact a valid address and phone number for the master account. Then try the operation again.

  • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICY_NUMBER_LIMIT_EXCEEDED. You attempted to exceed the number of policies that you can have in an organization.

", + "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation.

  • ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master account from the organization. You can't remove the master account. Instead, after you remove all member accounts, delete the organization itself.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first agree to the AWS Customer Agreement. Follow the steps at Removing a member account from your organizationin the AWS Organizations User Guide.

  • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the AWS Organizations User Guide.

  • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

  • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact AWS Support to request an increase in your limit.

    Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact AWS Support to request an increase in the number of accounts.

    Deleted and closed accounts still count toward your limit.

    If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact AWS Support.

  • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the master account of the organization as a delegated administrator for an AWS service integrated with Organizations. You can designate only a member account as a delegated administrator.

  • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

  • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

  • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an AWS account as a delegated administrator for an AWS service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

  • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

  • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

  • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's master account to the marketplace that corresponds to the master account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

  • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions in China. To create an organization, the master must have an valid business license. For more information, contact customer support.

  • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the master account. Then try the operation again.

  • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the master account must have an associated account in the AWS GovCloud (US-West) Region. For more information, see AWS Organizations in the AWS GovCloud User Guide.

  • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this master account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

  • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

  • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

  • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the AWS Organizations User Guide.

  • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

  • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

  • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

  • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

  • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

  • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

  • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

", "exception":true }, "ConstraintViolationExceptionReason":{ @@ -1299,7 +1304,8 @@ "MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED", "CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR", "CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG", - "DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE" + "DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE", + "MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE" ] }, "CreateAccountFailureReason":{ @@ -1341,6 +1347,7 @@ }, "CreateAccountRequestId":{ "type":"string", + "max":36, "pattern":"^car-[a-z0-9]{8,32}$" }, "CreateAccountResponse":{ @@ -1500,7 +1507,7 @@ "members":{ "Content":{ "shape":"PolicyContent", - "documentation":"

The policy content to add to the new policy. For example, if you create a service control policy (SCP), this string must be JSON text that specifies the permissions that admins in attached accounts can delegate to their users, groups, and roles. For more information about the SCP syntax, see Service Control Policy Syntax in the AWS Organizations User Guide.

" + "documentation":"

The policy text content to add to the new policy. The text that you supply must adhere to the rules of the policy type you specify in the Type parameter.

" }, "Description":{ "shape":"PolicyDescription", @@ -1512,7 +1519,7 @@ }, "Type":{ "shape":"PolicyType", - "documentation":"

The type of policy to create.

In the current release, the only type of policy that you can create is a service control policy (SCP).

" + "documentation":"

The type of policy to create. You can specify one of the following values:

" } } }, @@ -1685,11 +1692,11 @@ "members":{ "PolicyType":{ "shape":"EffectivePolicyType", - "documentation":"

The type of policy that you want information about.

" + "documentation":"

The type of policy that you want information about. You can specify one of the following values:

" }, "TargetId":{ "shape":"PolicyTargetId", - "documentation":"

When you're signed in as the master account, specify the ID of the account that you want details about. Specifying an organization root or OU as the target is not supported.

" + "documentation":"

When you're signed in as the master account, specify the ID of the account that you want details about. Specifying an organization root or organizational unit (OU) as the target is not supported.

" } } }, @@ -1726,7 +1733,7 @@ "members":{ "Organization":{ "shape":"Organization", - "documentation":"

A structure that contains information about the organization.

" + "documentation":"

A structure that contains information about the organization.

The AvailablePolicyTypes part of the response is deprecated, and you shouldn't use it in your apps. It doesn't include any policy type supported by Organizations other than SCPs. To determine which policy types are enabled in your organization, use the ListRoots operation.

" } } }, @@ -1816,7 +1823,7 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

The policy type that you want to disable in this root.

" + "documentation":"

The policy type that you want to disable in this root. You can specify one of the following values:

" } } }, @@ -1901,7 +1908,11 @@ }, "EffectivePolicyType":{ "type":"string", - "enum":["TAG_POLICY"] + "enum":[ + "TAG_POLICY", + "BACKUP_POLICY", + "AISERVICES_OPT_OUT_POLICY" + ] }, "Email":{ "type":"string", @@ -1947,7 +1958,7 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

The policy type that you want to enable.

" + "documentation":"

The policy type that you want to enable. You can specify one of the following values:

" } } }, @@ -2080,6 +2091,7 @@ }, "HandshakeId":{ "type":"string", + "max":34, "pattern":"^h-[0-9a-z]{8,32}$" }, "HandshakeNotFoundException":{ @@ -2093,6 +2105,7 @@ "HandshakeNotes":{ "type":"string", "max":1024, + "pattern":"[\\s\\S]*", "sensitive":true }, "HandshakeParties":{ @@ -2121,6 +2134,7 @@ "type":"string", "max":64, "min":1, + "pattern":"[\\s\\S]*", "sensitive":true }, "HandshakePartyType":{ @@ -2206,7 +2220,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"InvalidInputExceptionReason"} }, - "documentation":"

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation:

  • IMMUTABLE_POLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUT_REQUIRED: You must include a value for all required parameters.

  • INVALID_ENUM: You specified an invalid value.

  • INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid characters.

  • INVALID_LIST_MEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALID_PATTERN: You provided a value that doesn't match the required pattern.

  • INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't match the required pattern.

  • INVALID_ROLE_NAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID.

  • INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter for the operation.

  • MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer than allowed.

  • MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter than allowed.

  • MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only between entities in the same root.

", + "documentation":"

The requested operation failed because you provided invalid values for one or more of the request parameters. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation.

  • IMMUTABLE_POLICY: You specified a policy that is managed by AWS and can't be modified.

  • INPUT_REQUIRED: You must include a value for all required parameters.

  • INVALID_ENUM: You specified an invalid value.

  • INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid characters.

  • INVALID_LIST_MEMBER: You provided a list to a parameter that contains at least one invalid value.

  • INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter from the response to a previous call of the operation.

  • INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, organization, or email) as a party.

  • INVALID_PATTERN: You provided a value that doesn't match the required pattern.

  • INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't match the required pattern.

  • INVALID_ROLE_NAME: You provided a role name that isn't valid. A role name can't begin with the reserved prefix AWSServiceRoleFor.

  • INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource Name (ARN) for the organization.

  • INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID.

  • INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system tag. You can’t add, edit, or delete system tag keys because they're reserved for AWS use. System tags don’t count against your tags per resource limit.

  • MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter for the operation.

  • MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer than allowed.

  • MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger value than allowed.

  • MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter than allowed.

  • MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller value than allowed.

  • MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only between entities in the same root.

", "exception":true }, "InvalidInputExceptionReason":{ @@ -2606,7 +2620,7 @@ }, "Filter":{ "shape":"PolicyType", - "documentation":"

The type of policy that you want to include in the returned list.

" + "documentation":"

The type of policy that you want to include in the returned list. You must specify one of the following values:

" }, "NextToken":{ "shape":"NextToken", @@ -2637,7 +2651,7 @@ "members":{ "Filter":{ "shape":"PolicyType", - "documentation":"

Specifies the type of policy that you want to include in the response.

" + "documentation":"

Specifies the type of policy that you want to include in the response. You must specify one of the following values:

" }, "NextToken":{ "shape":"NextToken", @@ -2790,7 +2804,11 @@ } } }, - "NextToken":{"type":"string"}, + "NextToken":{ + "type":"string", + "max":100000, + "pattern":"[\\s\\S]*" + }, "Organization":{ "type":"structure", "members":{ @@ -2820,7 +2838,7 @@ }, "AvailablePolicyTypes":{ "shape":"PolicyTypes", - "documentation":"

A list of policy types that are enabled for this organization. For example, if your organization has all features enabled, then service control policies (SCPs) are included in the list.

Even if a policy type is shown as available in the organization, you can separately enable and disable them at the root level by using EnablePolicyType and DisablePolicyType. Use ListRoots to see the status of a policy type in that root.

" + "documentation":"

Do not use. This field is deprecated and doesn't provide complete information about the policies in your organization.

To determine the policies that are enabled and available for use in your organization, use the ListRoots operation instead.

" } }, "documentation":"

Contains details about an organization. An organization is a collection of accounts that are centrally managed together using consolidated billing, organized hierarchically with organizational units (OUs), and controlled with policies .

" @@ -2872,12 +2890,14 @@ }, "OrganizationalUnitId":{ "type":"string", + "max":68, "pattern":"^ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$" }, "OrganizationalUnitName":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":"[\\s\\S]*" }, "OrganizationalUnitNotEmptyException":{ "type":"structure", @@ -2915,6 +2935,7 @@ }, "ParentId":{ "type":"string", + "max":100, "pattern":"^(r-[0-9a-z]{4,32})|(ou-[0-9a-z]{4,32}-[a-z0-9]{8,32})$" }, "ParentNotFoundException":{ @@ -2969,14 +2990,17 @@ "PolicyContent":{ "type":"string", "max":1000000, - "min":1 + "min":1, + "pattern":"[\\s\\S]*" }, "PolicyDescription":{ "type":"string", - "max":512 + "max":512, + "pattern":"[\\s\\S]*" }, "PolicyId":{ "type":"string", + "max":130, "pattern":"^p-[0-9a-zA-Z_]{8,128}$" }, "PolicyInUseException":{ @@ -2990,7 +3014,8 @@ "PolicyName":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":"[\\s\\S]*" }, "PolicyNotAttachedException":{ "type":"structure", @@ -3040,6 +3065,7 @@ }, "PolicyTargetId":{ "type":"string", + "max":100, "pattern":"^(r-[0-9a-z]{4,32})|(\\d{12})|(ou-[0-9a-z]{4,32}-[a-z0-9]{8,32})$" }, "PolicyTargetSummary":{ @@ -3072,7 +3098,9 @@ "type":"string", "enum":[ "SERVICE_CONTROL_POLICY", - "TAG_POLICY" + "TAG_POLICY", + "BACKUP_POLICY", + "AISERVICES_OPT_OUT_POLICY" ] }, "PolicyTypeAlreadyEnabledException":{ @@ -3088,7 +3116,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

You can't use the specified policy type with the feature set currently enabled for this organization. For example, you can enable SCPs only after you enable all features in the organization. For more information, see Enabling and Disabling a Policy Type on a Root in the AWS Organizations User Guide.

", + "documentation":"

You can't use the specified policy type with the feature set currently enabled for this organization. For example, you can enable SCPs only after you enable all features in the organization. For more information, see Managing AWS Organizations Policiesin the AWS Organizations User Guide.

", "exception":true }, "PolicyTypeNotEnabledException":{ @@ -3154,6 +3182,7 @@ }, "RoleName":{ "type":"string", + "max":64, "pattern":"[\\w+=,.@-]{1,64}" }, "Root":{ @@ -3184,6 +3213,7 @@ }, "RootId":{ "type":"string", + "max":34, "pattern":"^r-[0-9a-z]{4,32}$" }, "RootName":{ @@ -3266,7 +3296,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The tag to add to the specified resource. Specifying the tag key is required. You can set the value of a tag to an empty string, but you can't set the value of a tag to null.

" + "documentation":"

The tag to add to the specified resource. You must specify both a tag key and value. You can set the value of a tag to an empty string, but you can't set it to null.

" } } }, @@ -3278,6 +3308,7 @@ }, "TaggableResourceId":{ "type":"string", + "max":12, "pattern":"^\\d{12}$" }, "Tags":{ @@ -3312,7 +3343,7 @@ "Type":{"shape":"ExceptionType"}, "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

You have sent too many requests in too short a period of time. The limit helps protect against denial-of-service attacks. Try again later.

For information on limits that affect AWS Organizations, see Limits of AWS Organizations in the AWS Organizations User Guide.

", + "documentation":"

You have sent too many requests in too short a period of time. The quota helps protect against denial-of-service attacks. Try again later.

For information about quotas that affect AWS Organizations, see Quotas for AWS Organizationsin the AWS Organizations User Guide.

", "exception":true }, "UnsupportedAPIEndpointException":{ @@ -3320,7 +3351,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

This action isn't available in the current Region.

", + "documentation":"

This action isn't available in the current AWS Region.

", "exception":true }, "UntagResourceRequest":{ diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index ad13027ad1b6..625fa2e4f02a 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index 394c8c3bce50..2bf3c37cc01e 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalize/src/main/resources/codegen-resources/service-2.json b/services/personalize/src/main/resources/codegen-resources/service-2.json index f9d3a0bbca16..200ea4278fa5 100644 --- a/services/personalize/src/main/resources/codegen-resources/service-2.json +++ b/services/personalize/src/main/resources/codegen-resources/service-2.json @@ -116,6 +116,22 @@ "documentation":"

Creates an event tracker that you use when sending event data to the specified dataset group using the PutEvents API.

When Amazon Personalize creates an event tracker, it also creates an event-interactions dataset in the dataset group associated with the event tracker. The event-interactions dataset stores the event data from the PutEvents call. The contents of this dataset are not available to the user.

Only one event tracker can be associated with a dataset group. You will get an error if you call CreateEventTracker using the same dataset group as an existing event tracker.

When you send event data you include your tracking ID. The tracking ID identifies the customer and authorizes the customer to send the data.

The event tracker can be in one of the following states:

  • CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED

  • DELETE PENDING > DELETE IN_PROGRESS

To get the status of the event tracker, call DescribeEventTracker.

The event tracker must be in the ACTIVE state before using the tracking ID.

Related APIs

", "idempotent":true }, + "CreateFilter":{ + "name":"CreateFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFilterRequest"}, + "output":{"shape":"CreateFilterResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Creates a recommendation filter. For more information, see Using Filters with Amazon Personalize.

" + }, "CreateSchema":{ "name":"CreateSchema", "http":{ @@ -224,6 +240,19 @@ "documentation":"

Deletes the event tracker. Does not delete the event-interactions dataset from the associated dataset group. For more information on event trackers, see CreateEventTracker.

", "idempotent":true }, + "DeleteFilter":{ + "name":"DeleteFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFilterRequest"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes a filter.

" + }, "DeleteSchema":{ "name":"DeleteSchema", "http":{ @@ -374,6 +403,21 @@ "documentation":"

Describes the given feature transformation.

", "idempotent":true }, + "DescribeFilter":{ + "name":"DescribeFilter", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFilterRequest"}, + "output":{"shape":"DescribeFilterResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes a filter's properties.

", + "idempotent":true + }, "DescribeRecipe":{ "name":"DescribeRecipe", "http":{ @@ -538,6 +582,21 @@ "documentation":"

Returns the list of event trackers associated with the account. The response provides the properties for each event tracker, including the Amazon Resource Name (ARN) and tracking ID. For more information on event trackers, see CreateEventTracker.

", "idempotent":true }, + "ListFilters":{ + "name":"ListFilters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFiltersRequest"}, + "output":{"shape":"ListFiltersResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Lists all filters that belong to a given dataset group.

", + "idempotent":true + }, "ListRecipes":{ "name":"ListRecipes", "http":{ @@ -729,6 +788,10 @@ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the batch inference job.

" }, + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter used on the batch inference job.

" + }, "failureReason":{ "shape":"FailureReason", "documentation":"

If the batch inference job failed, the reason for the failure.

" @@ -1011,6 +1074,10 @@ "shape":"Arn", "documentation":"

The Amazon Resource Name (ARN) of the solution version that will be used to generate the batch inference recommendations.

" }, + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter to apply to the batch inference job. For more information on using filters, see Using Filters with Amazon Personalize.

" + }, "numResults":{ "shape":"NumBatchResults", "documentation":"

The number of recommendations to retreive.

" @@ -1198,6 +1265,37 @@ } } }, + "CreateFilterRequest":{ + "type":"structure", + "required":[ + "name", + "datasetGroupArn", + "filterExpression" + ], + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the filter to create.

" + }, + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group that the filter will belong to.

" + }, + "filterExpression":{ + "shape":"FilterExpression", + "documentation":"

The filter expression that designates the interaction types that the filter will filter out. A filter expression must follow the following format:

EXCLUDE itemId WHERE INTERACTIONS.event_type in (\"EVENT_TYPE\")

Where \"EVENT_TYPE\" is the type of event to filter out. To filter out all items with any interactions history, set \"*\" as the EVENT_TYPE. For more information, see Using Filters with Amazon Personalize.

" + } + } + }, + "CreateFilterResponse":{ + "type":"structure", + "members":{ + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the new filter.

" + } + } + }, "CreateSchemaRequest":{ "type":"structure", "required":[ @@ -1714,6 +1812,16 @@ } } }, + "DeleteFilterRequest":{ + "type":"structure", + "required":["filterArn"], + "members":{ + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter to delete.

" + } + } + }, "DeleteSchemaRequest":{ "type":"structure", "required":["schemaArn"], @@ -1886,6 +1994,25 @@ } } }, + "DescribeFilterRequest":{ + "type":"structure", + "required":["filterArn"], + "members":{ + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter to describe.

" + } + } + }, + "DescribeFilterResponse":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"Filter", + "documentation":"

The filter's details.

" + } + } + }, "DescribeRecipeRequest":{ "type":"structure", "required":["recipeArn"], @@ -2088,6 +2215,89 @@ "value":{"shape":"ParameterValue"}, "max":100 }, + "Filter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the filter.

" + }, + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter.

" + }, + "creationDateTime":{ + "shape":"Date", + "documentation":"

The time at which the filter was created.

" + }, + "lastUpdatedDateTime":{ + "shape":"Date", + "documentation":"

The time at which the filter was last updated.

" + }, + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group to which the filter belongs.

" + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

If the filter failed, the reason for its failure.

" + }, + "filterExpression":{ + "shape":"FilterExpression", + "documentation":"

Specifies the type of item interactions to filter out of recommendation results. The filter expression must follow the following format:

EXCLUDE itemId WHERE INTERACTIONS.event_type in (\"EVENT_TYPE\")

Where \"EVENT_TYPE\" is the type of event to filter out. For more information, see Using Filters with Amazon Personalize.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The status of the filter.

" + } + }, + "documentation":"

Contains information on a recommendation filter, including its ARN, status, and filter expression.

" + }, + "FilterExpression":{ + "type":"string", + "max":2500, + "min":1, + "sensitive":true + }, + "FilterSummary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"Name", + "documentation":"

The name of the filter.

" + }, + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter.

" + }, + "creationDateTime":{ + "shape":"Date", + "documentation":"

The time at which the filter was created.

" + }, + "lastUpdatedDateTime":{ + "shape":"Date", + "documentation":"

The time at which the filter was last updated.

" + }, + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group to which the filter belongs.

" + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

If the filter failed, the reason for the failure.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The status of the filter.

" + } + }, + "documentation":"

A short summary of a filter's attributes.

" + }, + "Filters":{ + "type":"list", + "member":{"shape":"FilterSummary"}, + "max":100 + }, "GetSolutionMetricsRequest":{ "type":"structure", "required":["solutionVersionArn"], @@ -2425,6 +2635,36 @@ } } }, + "ListFiltersRequest":{ + "type":"structure", + "members":{ + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The ARN of the dataset group that contains the filters.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token returned from the previous call to ListFilters for getting the next set of filters (if they exist).

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of filters to return.

" + } + } + }, + "ListFiltersResponse":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"Filters", + "documentation":"

A list of returned filters.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token for getting the next set of filters (if they exist).

" + } + } + }, "ListRecipesRequest":{ "type":"structure", "members":{ diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index 47d6f24e2f32..c05eba04f590 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index 61d657e6ce96..0f4a90a9f6f5 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/personalizeruntime/src/main/resources/codegen-resources/service-2.json b/services/personalizeruntime/src/main/resources/codegen-resources/service-2.json index bee262699de9..789470f04fce 100644 --- a/services/personalizeruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/personalizeruntime/src/main/resources/codegen-resources/service-2.json @@ -88,7 +88,7 @@ }, "context":{ "shape":"Context", - "documentation":"

The contextual metadata to use when getting recommendations. Contextual metadata includes any interaction information that might be relevant when getting a user's recommendations, such as the user's current location or device type. For more information, see Contextual Metadata.

" + "documentation":"

The contextual metadata to use when getting recommendations. Contextual metadata includes any interaction information that might be relevant when getting a user's recommendations, such as the user's current location or device type.

" } } }, @@ -123,7 +123,11 @@ }, "context":{ "shape":"Context", - "documentation":"

The contextual metadata to use when getting recommendations. Contextual metadata includes any interaction information that might be relevant when getting a user's recommendations, such as the user's current location or device type. For more information, see Contextual Metadata.

" + "documentation":"

The contextual metadata to use when getting recommendations. Contextual metadata includes any interaction information that might be relevant when getting a user's recommendations, such as the user's current location or device type.

" + }, + "filterArn":{ + "shape":"Arn", + "documentation":"

The ARN of the filter to apply to the returned recommendations. For more information, see Using Filters with Amazon Personalize.

" } } }, @@ -167,6 +171,10 @@ "itemId":{ "shape":"ItemID", "documentation":"

The recommended item ID.

" + }, + "score":{ + "shape":"Score", + "documentation":"

A numeric representation of the model's certainty that the item will be the next user selection. For more information on scoring logic, see how-scores-work.

" } }, "documentation":"

An object that identifies an item.

The and APIs return a list of PredictedItems.

" @@ -180,6 +188,7 @@ "error":{"httpStatusCode":404}, "exception":true }, + "Score":{"type":"double"}, "UserID":{ "type":"string", "max":256 diff --git a/services/pi/pom.xml b/services/pi/pom.xml index af8148637e1d..49807ec81d77 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index c9dd81ccf878..a8f5cb61e294 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpoint/src/main/resources/codegen-resources/service-2.json b/services/pinpoint/src/main/resources/codegen-resources/service-2.json index 42f7a0208b2e..018e0faf0e65 100644 --- a/services/pinpoint/src/main/resources/codegen-resources/service-2.json +++ b/services/pinpoint/src/main/resources/codegen-resources/service-2.json @@ -4475,7 +4475,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Creates a new endpoint for an application or updates the settings and attributes of an existing endpoint for an application. You can also use this operation to define custom attributes (Attributes, Metrics, and UserAttributes properties) for an endpoint.

" + "documentation": "

Creates a new endpoint for an application or updates the settings and attributes of an existing endpoint for an application. You can also use this operation to define custom attributes for an endpoint. If an update includes one or more values for a custom attribute, Amazon Pinpoint replaces (overwrites) any existing values with the new values.

" }, "UpdateEndpointsBatch": { "name": "UpdateEndpointsBatch", @@ -4521,7 +4521,7 @@ "documentation": "

The request failed because too many requests were sent during a certain amount of time (TooManyRequestsException).

" } ], - "documentation": "

Creates a new batch of endpoints for an application or updates the settings and attributes of a batch of existing endpoints for an application. You can also use this operation to define custom attributes (Attributes, Metrics, and UserAttributes properties) for a batch of endpoints.

" + "documentation": "

Creates a new batch of endpoints for an application or updates the settings and attributes of a batch of existing endpoints for an application. You can also use this operation to define custom attributes for a batch of endpoints. If an update includes one or more values for a custom attribute, Amazon Pinpoint replaces (overwrites) any existing values with the new values.

" }, "UpdateGcmChannel": { "name": "UpdateGcmChannel", @@ -5692,6 +5692,10 @@ "Activity": { "type": "structure", "members": { + "CUSTOM": { + "shape": "CustomMessageActivity", + "documentation": "

The settings for a custom message activity. This type of activity calls an AWS Lambda function or web hook that sends messages to participants.

" + }, "ConditionalSplit": { "shape": "ConditionalSplitActivity", "documentation": "

The settings for a yes/no split activity. This type of activity sends participants down one of two paths in a journey, based on conditions that you specify.

" @@ -5712,10 +5716,18 @@ "shape": "MultiConditionalSplitActivity", "documentation": "

The settings for a multivariate split activity. This type of activity sends participants down one of as many as five paths (including a default Else path) in a journey, based on conditions that you specify.

" }, + "PUSH": { + "shape": "PushMessageActivity", + "documentation": "

The settings for a push notification activity. This type of activity sends a push notification to participants.

" + }, "RandomSplit": { "shape": "RandomSplitActivity", "documentation": "

The settings for a random split activity. This type of activity randomly sends specified percentages of participants down one of as many as five paths in a journey, based on conditions that you specify.

" }, + "SMS": { + "shape": "SMSMessageActivity", + "documentation": "

The settings for an SMS activity. This type of activity sends a text message to participants.

" + }, "Wait": { "shape": "WaitActivity", "documentation": "

The settings for a wait activity. This type of activity waits for a certain amount of time or until a specific date and time before moving participants to the next activity in a journey.

" @@ -5871,7 +5883,7 @@ }, "KpiName": { "shape": "__string", - "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

" + "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

" }, "KpiResult": { "shape": "BaseKpiResult", @@ -5932,7 +5944,7 @@ }, "CampaignHook": { "shape": "CampaignHook", - "documentation": "

The settings for the AWS Lambda function to use by default as a code hook for campaigns in the application.

" + "documentation": "

The settings for the AWS Lambda function to invoke by default as a code hook for campaigns in the application. You can use this hook to customize segments that are used by campaigns in the application.

" }, "LastModifiedDate": { "shape": "__string", @@ -5940,7 +5952,7 @@ }, "Limits": { "shape": "CampaignLimits", - "documentation": "

The default sending limits for campaigns in the application.

" + "documentation": "

The default sending limits for campaigns and journeys in the application.

" }, "QuietTime": { "shape": "QuietTime", @@ -6181,6 +6193,16 @@ "Rows" ] }, + "CampaignCustomMessage": { + "type": "structure", + "members": { + "Data": { + "shape": "__string", + "documentation": "

The raw, JSON-formatted string to use as the payload for the message. The maximum size is 5 KB.

" + } + }, + "documentation": "

Specifies the contents of a message that's sent through a custom channel to recipients of a campaign.

" + }, "CampaignDateRangeKpiResponse": { "type": "structure", "members": { @@ -6198,7 +6220,7 @@ }, "KpiName": { "shape": "__string", - "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

" + "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

" }, "KpiResult": { "shape": "BaseKpiResult", @@ -6268,25 +6290,25 @@ "members": { "LambdaFunctionName": { "shape": "__string", - "documentation": "

The name or Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Pinpoint invokes to send messages for a campaign.

" + "documentation": "

The name or Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Pinpoint invokes to customize a segment for a campaign.

" }, "Mode": { "shape": "Mode", - "documentation": "

Specifies which Lambda mode to use when invoking the AWS Lambda function.

" + "documentation": "

The mode that Amazon Pinpoint uses to invoke the AWS Lambda function. Possible values are:

  • FILTER - Invoke the function to customize the segment that's used by a campaign.

  • DELIVERY - (Deprecated) Previously, invoked the function to send a campaign through a custom channel. This functionality is not supported anymore. To send a campaign through a custom channel, use the CustomDeliveryConfiguration and CampaignCustomMessage objects of the campaign.

" }, "WebUrl": { "shape": "__string", "documentation": "

The web URL that Amazon Pinpoint calls to invoke the AWS Lambda function over HTTPS.

" } }, - "documentation": "

Specifies the AWS Lambda function to use as a code hook for a campaign.

" + "documentation": "

Specifies settings for invoking an AWS Lambda function that customizes a segment for a campaign.

" }, "CampaignLimits": { "type": "structure", "members": { "Daily": { "shape": "__integer", - "documentation": "

The maximum number of messages that a campaign can send to a single endpoint during a 24-hour period. The maximum value is 100.

" + "documentation": "

The maximum number of messages that a campaign can send to a single endpoint during a 24-hour period. For an application, this value specifies the default limit for the number of messages that campaigns and journeys can send to a single endpoint during a 24-hour period. The maximum value is 100.

" }, "MaximumDuration": { "shape": "__integer", @@ -6294,14 +6316,14 @@ }, "MessagesPerSecond": { "shape": "__integer", - "documentation": "

The maximum number of messages that a campaign can send each second. The minimum value is 50. The maximum value is 20,000.

" + "documentation": "

The maximum number of messages that a campaign can send each second. For an application, this value specifies the default limit for the number of messages that campaigns and journeys can send each second. The minimum value is 50. The maximum value is 20,000.

" }, "Total": { "shape": "__integer", - "documentation": "

The maximum number of messages that a campaign can send to a single endpoint during the course of the campaign. The maximum value is 100.

" + "documentation": "

The maximum number of messages that a campaign can send to a single endpoint during the course of the campaign. If a campaign recurs, this setting applies to all runs of the campaign. The maximum value is 100.

" } }, - "documentation": "

Specifies limits on the messages that a campaign can send.

" + "documentation": "

For a campaign, specifies limits on the messages that the campaign can send. For an application, specifies the default limits for messages that campaigns and journeys in the application can send.

" }, "CampaignResponse": { "type": "structure", @@ -6322,9 +6344,13 @@ "shape": "__string", "documentation": "

The date, in ISO 8601 format, when the campaign was created.

" }, + "CustomDeliveryConfiguration": { + "shape": "CustomDeliveryConfiguration", + "documentation": "

The delivery configuration settings for sending the campaign through a custom channel.

" + }, "DefaultState": { "shape": "CampaignState", - "documentation": "

The current status of the campaign's default treatment. This value exists only for campaigns that have more than one treatment, to support A/B testing.

" + "documentation": "

The current status of the campaign's default treatment. This value exists only for campaigns that have more than one treatment.

" }, "Description": { "shape": "__string", @@ -6336,7 +6362,7 @@ }, "Hook": { "shape": "CampaignHook", - "documentation": "

The settings for the AWS Lambda function to use as a code hook for the campaign.

" + "documentation": "

The settings for the AWS Lambda function to use as a code hook for the campaign. You can use this hook to customize the segment that's used by the campaign.

" }, "Id": { "shape": "__string", @@ -6389,11 +6415,11 @@ }, "TreatmentDescription": { "shape": "__string", - "documentation": "

The custom description of a variation of the campaign that's used for A/B testing.

" + "documentation": "

The custom description of the default treatment for the campaign.

" }, "TreatmentName": { "shape": "__string", - "documentation": "

The custom name of a variation of the campaign that's used for A/B testing.

" + "documentation": "

The custom name of the default treatment for the campaign, if the campaign has multiple treatments. A treatment is a variation of a campaign that's used for A/B testing.

" }, "Version": { "shape": "__integer", @@ -6420,7 +6446,7 @@ }, "MessageType": { "shape": "MessageType", - "documentation": "

The type of SMS message. Valid values are: TRANSACTIONAL, the message is critical or time-sensitive, such as a one-time password that supports a customer transaction; and, PROMOTIONAL, the message isn't critical or time-sensitive, such as a marketing message.

" + "documentation": "

The SMS message type. Valid values are TRANSACTIONAL (for messages that are critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL (for messsages that aren't critical or time-sensitive, such as marketing messages).

" }, "SenderId": { "shape": "__string", @@ -6434,7 +6460,7 @@ "members": { "CampaignStatus": { "shape": "CampaignStatus", - "documentation": "

The current status of the campaign, or the current status of a treatment that belongs to an A/B test campaign. If a campaign uses A/B testing, the campaign has a status of COMPLETED only if all campaign treatments have a status of COMPLETED.

" + "documentation": "

The current status of the campaign, or the current status of a treatment that belongs to an A/B test campaign.

If a campaign uses A/B testing, the campaign has a status of COMPLETED only if all campaign treatments have a status of COMPLETED. If you delete the segment that's associated with a campaign, the campaign fails and has a status of DELETED.

" } }, "documentation": "

Provides information about the status of a campaign.

" @@ -6512,6 +6538,7 @@ "ChannelType": { "type": "string", "enum": [ + "PUSH", "GCM", "APNS", "APNS_SANDBOX", @@ -6807,11 +6834,11 @@ "members": { "Attributes": { "shape": "MapOf__string", - "documentation": "

A map of key-value pairs that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommenderUserIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

In the map, the key is the name of a custom attribute and the value is a custom display name for that attribute. The display name appears in the Attribute finder pane of the template editor on the Amazon Pinpoint console. The following restrictions apply to these names:

  • An attribute name must start with a letter or number and it can contain up to 50 characters. The characters can be letters, numbers, underscores (_), or hyphens (-). Attribute names are case sensitive and must be unique.

  • An attribute display name must start with a letter or number and it can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-).

This object is required if the configuration invokes an AWS Lambda function (LambdaFunctionArn) to process recommendation data. Otherwise, don't include this object in your request.

" + "documentation": "

A map of key-value pairs that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommendationProviderIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

In the map, the key is the name of a custom attribute and the value is a custom display name for that attribute. The display name appears in the Attribute finder of the template editor on the Amazon Pinpoint console. The following restrictions apply to these names:

  • An attribute name must start with a letter or number and it can contain up to 50 characters. The characters can be letters, numbers, underscores (_), or hyphens (-). Attribute names are case sensitive and must be unique.

  • An attribute display name must start with a letter or number and it can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-).

This object is required if the configuration invokes an AWS Lambda function (RecommendationTransformerUri) to process recommendation data. Otherwise, don't include this object in your request.

" }, "Description": { "shape": "__string", - "documentation": "

A custom description of the configuration for the recommender model. The description can contain up to 128 characters.

" + "documentation": "

A custom description of the configuration for the recommender model. The description can contain up to 128 characters. The characters can be letters, numbers, spaces, or the following symbols: _ ; () , ‐.

" }, "Name": { "shape": "__string", @@ -6819,7 +6846,7 @@ }, "RecommendationProviderIdType": { "shape": "__string", - "documentation": "

The type of Amazon Pinpoint ID to associate with unique user IDs in the recommender model. This value enables the model to use attribute and event data that’s specific to a particular endpoint or user in an Amazon Pinpoint application. Valid values are:

  • PINPOINT_ENDPOINT_ID - Associate each user in the model with a particular endpoint in Amazon Pinpoint. The data is correlated based on endpoint IDs in Amazon Pinpoint. This is the default value.

  • PINPOINT_USER_ID - Associate each user in the model with a particular user and endpoint in Amazon Pinpoint. The data is correlated based on user IDs in Amazon Pinpoint. If you specify this value, an endpoint definition in Amazon Pinpoint has to specify a both a user ID (UserId) and an endpoint ID. Otherwise, messages won’t be sent to the user's endpoint.

" + "documentation": "

The type of Amazon Pinpoint ID to associate with unique user IDs in the recommender model. This value enables the model to use attribute and event data that’s specific to a particular endpoint or user in an Amazon Pinpoint application. Valid values are:

  • PINPOINT_ENDPOINT_ID - Associate each user in the model with a particular endpoint in Amazon Pinpoint. The data is correlated based on endpoint IDs in Amazon Pinpoint. This is the default value.

  • PINPOINT_USER_ID - Associate each user in the model with a particular user and endpoint in Amazon Pinpoint. The data is correlated based on user IDs in Amazon Pinpoint. If you specify this value, an endpoint definition in Amazon Pinpoint has to specify both a user ID (UserId) and an endpoint ID. Otherwise, messages won’t be sent to the user's endpoint.

" }, "RecommendationProviderRoleArn": { "shape": "__string", @@ -6835,11 +6862,11 @@ }, "RecommendationsDisplayName": { "shape": "__string", - "documentation": "

A custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores a recommended item for each endpoint or user, depending on the value for the RecommenderUserIdType property. This value is required if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

This name appears in the Attribute finder pane of the template editor on the Amazon Pinpoint console. The name can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions don't apply to attribute values.

" + "documentation": "

A custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores recommended items for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This value is required if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

This name appears in the Attribute finder of the template editor on the Amazon Pinpoint console. The name can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions don't apply to attribute values.

" }, "RecommendationsPerMessage": { "shape": "__integer", - "documentation": "

The number of recommended items to retrieve from the model for each endpoint or user, depending on the value for the RecommenderUserIdType property. This number determines how many recommended attributes are available for use as message variables in message templates. The minimum value is 1. The maximum value is 5. The default value is 5.

To use multiple recommended items and custom attributes with message variables, you have to use an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

" + "documentation": "

The number of recommended items to retrieve from the model for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This number determines how many recommended items are available for use in message variables. The minimum value is 1. The maximum value is 5. The default value is 5.

To use multiple recommended items and custom attributes with message variables, you have to use an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

" } }, "documentation": "

Specifies Amazon Pinpoint configuration settings for retrieving and processing recommendation data from a recommender model.

", @@ -6983,6 +7010,53 @@ ], "payload": "CreateTemplateMessageBody" }, + "CustomDeliveryConfiguration": { + "type": "structure", + "members": { + "DeliveryUri": { + "shape": "__string", + "documentation": "

The destination to send the campaign or treatment to. This value can be one of the following:

  • The name or Amazon Resource Name (ARN) of an AWS Lambda function to invoke to handle delivery of the campaign or treatment.

  • The URL for a web application or service that supports HTTPS and can receive the message. The URL has to be a full URL, including the HTTPS protocol.

" + }, + "EndpointTypes": { + "shape": "ListOf__EndpointTypesElement", + "documentation": "

The types of endpoints to send the campaign or treatment to. Each valid value maps to a type of channel that you can associate with an endpoint by using the ChannelType property of an endpoint.

" + } + }, + "documentation": "

Specifies the delivery configuration settings for sending a campaign or campaign treatment through a custom channel. This object is required if you use the CampaignCustomMessage object to define the message to send for the campaign or campaign treatment.

", + "required": [ + "DeliveryUri" + ] + }, + "CustomMessageActivity": { + "type": "structure", + "members": { + "DeliveryUri": { + "shape": "__string", + "documentation": "

The destination to send the custom message to. This value can be one of the following:

  • The name or Amazon Resource Name (ARN) of an AWS Lambda function to invoke to handle delivery of the custom message.

  • The URL for a web application or service that supports HTTPS and can receive the message. The URL has to be a full URL, including the HTTPS protocol.

" + }, + "EndpointTypes": { + "shape": "ListOf__EndpointTypesElement", + "documentation": "

The types of endpoints to send the custom message to. Each valid value maps to a type of channel that you can associate with an endpoint by using the ChannelType property of an endpoint.

" + }, + "MessageConfig": { + "shape": "JourneyCustomMessage", + "documentation": "

Specifies the message data included in a custom channel message that's sent to participants in a journey.

" + }, + "NextActivity": { + "shape": "__string", + "documentation": "

The unique identifier for the next activity to perform, after Amazon Pinpoint calls the AWS Lambda function or web hook.

" + }, + "TemplateName": { + "shape": "__string", + "documentation": "

The name of the custom message template to use for the message. If specified, this value must match the name of an existing message template.

" + }, + "TemplateVersion": { + "shape": "__string", + "documentation": "

The unique identifier for the version of the message template to use for the message. If specified, this value must match the identifier for an existing template version. To retrieve a list of versions and version identifiers for a template, use the Template Versions resource.

If you don't specify a value for this property, Amazon Pinpoint uses the active version of the template. The active version is typically the version of a template that's been most recently reviewed and approved for use, depending on your workflow. It isn't necessarily the latest version of a template.

" + } + }, + "documentation": "

The settings for a custom message activity. This type of activity calls an AWS Lambda function or web hook that sends messages to participants.

" + }, "DefaultMessage": { "type": "structure", "members": { @@ -7763,7 +7837,7 @@ "members": { "ConfigurationSet": { "shape": "__string", - "documentation": "

The configuration set that you want to apply to email that you send through the channel by using the Amazon Pinpoint Email API.

" + "documentation": "

The Amazon SES configuration set that you want to apply to messages that you send through the channel.

" }, "Enabled": { "shape": "__boolean", @@ -7797,7 +7871,7 @@ }, "ConfigurationSet": { "shape": "__string", - "documentation": "

The configuration set that's applied to email that's sent through the channel by using the Amazon Pinpoint Email API.

" + "documentation": "

The Amazon SES configuration set that's applied to messages that are sent through the channel.

" }, "CreationDate": { "shape": "__string", @@ -7809,7 +7883,7 @@ }, "FromAddress": { "shape": "__string", - "documentation": "

The verified email address that you send email from when you send email through the channel.

" + "documentation": "

The verified email address that email is sent from when you send email through the channel.

" }, "HasCredential": { "shape": "__boolean", @@ -7821,7 +7895,7 @@ }, "Identity": { "shape": "__string", - "documentation": "

The Amazon Resource Name (ARN) of the identity, verified with Amazon Simple Email Service (Amazon SES), that you use when you send email through the channel.

" + "documentation": "

The Amazon Resource Name (ARN) of the identity, verified with Amazon Simple Email Service (Amazon SES), that's used when you send email through the channel.

" }, "IsArchived": { "shape": "__boolean", @@ -7837,7 +7911,7 @@ }, "MessagesPerSecond": { "shape": "__integer", - "documentation": "

The maximum number of emails that you can send through the channel each second.

" + "documentation": "

The maximum number of emails that can be sent through the channel each second.

" }, "Platform": { "shape": "__string", @@ -7896,7 +7970,7 @@ "members": { "MessageConfig": { "shape": "JourneyEmailMessage", - "documentation": "

The \"From\" address to use for the message.

" + "documentation": "

Specifies the sender address for an email message that's sent to participants in the journey.

" }, "NextActivity": { "shape": "__string", @@ -7904,7 +7978,7 @@ }, "TemplateName": { "shape": "__string", - "documentation": "

The name of the email template to use for the message.

" + "documentation": "

The name of the email message template to use for the message. If specified, this value must match the name of an existing message template.

" }, "TemplateVersion": { "shape": "__string", @@ -8062,7 +8136,7 @@ }, "User": { "shape": "EndpointUser", - "documentation": "

One or more custom user attributes that describe the user who's associated with the endpoint.

" + "documentation": "

One or more custom attributes that describe the user who's associated with the endpoint.

" } }, "documentation": "

Specifies an endpoint to create or update and the settings and attributes to set or change for the endpoint.

" @@ -8241,7 +8315,7 @@ }, "User": { "shape": "EndpointUser", - "documentation": "

One or more custom user attributes that describe the user who's associated with the endpoint.

" + "documentation": "

One or more custom attributes that describe the user who's associated with the endpoint.

" } }, "documentation": "

Specifies the channel type and other settings for an endpoint.

" @@ -8427,10 +8501,7 @@ "documentation": "

The message identifier (message_id) for the message to use when determining whether message events meet the condition.

" } }, - "documentation": "

Specifies the conditions to evaluate for an event that applies to an activity in a journey.

", - "required": [ - "Dimensions" - ] + "documentation": "

Specifies the conditions to evaluate for an event that applies to an activity in a journey.

" }, "EventDimensions": { "type": "structure", @@ -9073,19 +9144,19 @@ "shape": "__string", "location": "uri", "locationName": "kpi-name", - "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), to retrieve data for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. Examples are email-open-rate and successful-delivery-rate. For a list of valid values, see the Amazon Pinpoint Developer Guide.

" + "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), to retrieve data for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. Examples are email-open-rate and successful-delivery-rate. For a list of valid values, see the Amazon Pinpoint Developer Guide.

" }, "NextToken": { "shape": "__string", "location": "querystring", "locationName": "next-token", - "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "PageSize": { "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "StartTime": { "shape": "__timestampIso8601", @@ -9144,7 +9215,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "Token": { "shape": "__string", @@ -9211,7 +9282,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "Token": { "shape": "__string", @@ -9262,19 +9333,19 @@ "shape": "__string", "location": "uri", "locationName": "kpi-name", - "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), to retrieve data for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. Examples are email-open-rate and successful-delivery-rate. For a list of valid values, see the Amazon Pinpoint Developer Guide.

" + "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), to retrieve data for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. Examples are email-open-rate and successful-delivery-rate. For a list of valid values, see the Amazon Pinpoint Developer Guide.

" }, "NextToken": { "shape": "__string", "location": "querystring", "locationName": "next-token", - "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "PageSize": { "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "StartTime": { "shape": "__timestampIso8601", @@ -9393,7 +9464,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "Token": { "shape": "__string", @@ -9432,7 +9503,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "Token": { "shape": "__string", @@ -9646,7 +9717,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "Token": { "shape": "__string", @@ -9743,7 +9814,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "Token": { "shape": "__string", @@ -9793,19 +9864,19 @@ "shape": "__string", "location": "uri", "locationName": "kpi-name", - "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), to retrieve data for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. Examples are email-open-rate and successful-delivery-rate. For a list of valid values, see the Amazon Pinpoint Developer Guide.

" + "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), to retrieve data for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. Examples are email-open-rate and successful-delivery-rate. For a list of valid values, see the Amazon Pinpoint Developer Guide.

" }, "NextToken": { "shape": "__string", "location": "querystring", "locationName": "next-token", - "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "PageSize": { "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "StartTime": { "shape": "__timestampIso8601", @@ -9857,13 +9928,13 @@ "shape": "__string", "location": "querystring", "locationName": "next-token", - "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "PageSize": { "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" } }, "required": [ @@ -9903,13 +9974,13 @@ "shape": "__string", "location": "querystring", "locationName": "next-token", - "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "PageSize": { "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" } }, "required": [ @@ -10027,7 +10098,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "Token": { "shape": "__string", @@ -10062,7 +10133,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "SegmentId": { "shape": "__string", @@ -10107,7 +10178,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "SegmentId": { "shape": "__string", @@ -10225,7 +10296,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "SegmentId": { "shape": "__string", @@ -10270,7 +10341,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "Token": { "shape": "__string", @@ -10684,6 +10755,16 @@ "FAILED" ] }, + "JourneyCustomMessage": { + "type": "structure", + "members": { + "Data": { + "shape": "__string", + "documentation": "

The message content that's passed to an AWS Lambda function or to a web hook.

" + } + }, + "documentation": "

Specifies the message content for a custom channel message that's sent to participants in a journey.

" + }, "JourneyDateRangeKpiResponse": { "type": "structure", "members": { @@ -10701,7 +10782,7 @@ }, "KpiName": { "shape": "__string", - "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

" + "documentation": "

The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

" }, "KpiResult": { "shape": "BaseKpiResult", @@ -10761,7 +10842,7 @@ }, "Metrics": { "shape": "MapOf__string", - "documentation": "

A JSON object that contains the results of the query. The results vary depending on the type of activity (ActivityType). For information about the structure and contents of the results, see the Amazon Pinpoint Developer Guide.

" + "documentation": "

A JSON object that contains the results of the query. The results vary depending on the type of activity (ActivityType). For information about the structure and contents of the results, see the Amazon Pinpoint Developer Guide.

" } }, "documentation": "

Provides the results of a query that retrieved the data for a standard execution metric that applies to a journey activity, and provides information about that query.

", @@ -10791,7 +10872,7 @@ }, "Metrics": { "shape": "MapOf__string", - "documentation": "

A JSON object that contains the results of the query. For information about the structure and contents of the results, see the Amazon Pinpoint Developer Guide.

" + "documentation": "

A JSON object that contains the results of the query. For information about the structure and contents of the results, see the Amazon Pinpoint Developer Guide.

" } }, "documentation": "

Provides the results of a query that retrieved the data for a standard execution metric that applies to a journey, and provides information about that query.

", @@ -10820,6 +10901,16 @@ }, "documentation": "

Specifies limits on the messages that a journey can send and the number of times participants can enter a journey.

" }, + "JourneyPushMessage": { + "type": "structure", + "members": { + "TimeToLive": { + "shape": "__string", + "documentation": "

The number of seconds that the push notification service should keep the message, if the service is unable to deliver the notification the first time. This value is converted to an expiration value when it's sent to a push-notification service. If this value is 0, the service treats the notification as if it expires immediately and the service doesn't store or try to deliver the notification again.

This value doesn't apply to messages that are sent through the Amazon Device Messaging (ADM) service.

" + } + }, + "documentation": "

Specifies the message configuration for a push notification that's sent to participants in a journey.

" + }, "JourneyResponse": { "type": "structure", "members": { @@ -10892,6 +10983,20 @@ "ApplicationId" ] }, + "JourneySMSMessage": { + "type": "structure", + "members": { + "MessageType": { + "shape": "MessageType", + "documentation": "

The SMS message type. Valid values are TRANSACTIONAL (for messages that are critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL (for messsages that aren't critical or time-sensitive, such as marketing messages).

" + }, + "SenderId": { + "shape": "__string", + "documentation": "

The sender ID to display as the sender of the message on a recipient's device. Support for sender IDs varies by country or region. For more information, see Supported Countries and Regions in the Amazon Pinpoint User Guide.

" + } + }, + "documentation": "

Specifies the sender ID and message type for an SMS message that's sent to participants in a journey.

" + }, "JourneySchedule": { "type": "structure", "members": { @@ -10950,7 +11055,7 @@ "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "Token": { "shape": "__string", @@ -11025,13 +11130,13 @@ "shape": "__string", "location": "querystring", "locationName": "next-token", - "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "PageSize": { "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "TemplateName": { "shape": "__string", @@ -11070,13 +11175,13 @@ "shape": "__string", "location": "querystring", "locationName": "next-token", - "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The string that specifies which page of results to return in a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "PageSize": { "shape": "__string", "location": "querystring", "locationName": "page-size", - "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is currently not supported for application, campaign, and journey metrics.

" + "documentation": "

The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.

" }, "Prefix": { "shape": "__string", @@ -11177,15 +11282,19 @@ "members": { "ADMMessage": { "shape": "Message", - "documentation": "

The message that the campaign sends through the ADM (Amazon Device Messaging) channel. This message overrides the default message.

" + "documentation": "

The message that the campaign sends through the ADM (Amazon Device Messaging) channel. If specified, this message overrides the default message.

" }, "APNSMessage": { "shape": "Message", - "documentation": "

The message that the campaign sends through the APNs (Apple Push Notification service) channel. This message overrides the default message.

" + "documentation": "

The message that the campaign sends through the APNs (Apple Push Notification service) channel. If specified, this message overrides the default message.

" }, "BaiduMessage": { "shape": "Message", - "documentation": "

The message that the campaign sends through the Baidu (Baidu Cloud Push) channel. This message overrides the default message.

" + "documentation": "

The message that the campaign sends through the Baidu (Baidu Cloud Push) channel. If specified, this message overrides the default message.

" + }, + "CustomMessage": { + "shape": "CampaignCustomMessage", + "documentation": "

The message that the campaign sends through a custom channel, as specified by the delivery configuration (CustomDeliveryConfiguration) settings for the campaign. If specified, this message overrides the default message.

" }, "DefaultMessage": { "shape": "Message", @@ -11193,15 +11302,15 @@ }, "EmailMessage": { "shape": "CampaignEmailMessage", - "documentation": "

The message that the campaign sends through the email channel.

" + "documentation": "

The message that the campaign sends through the email channel. If specified, this message overrides the default message.

" }, "GCMMessage": { "shape": "Message", - "documentation": "

The message that the campaign sends through the GCM channel, which enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service. This message overrides the default message.

" + "documentation": "

The message that the campaign sends through the GCM channel, which enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service. If specified, this message overrides the default message.

" }, "SMSMessage": { "shape": "CampaignSmsMessage", - "documentation": "

The message that the campaign sends through the SMS channel.

" + "documentation": "

The message that the campaign sends through the SMS channel. If specified, this message overrides the default message.

" } }, "documentation": "

Specifies the message configuration settings for a campaign.

" @@ -11569,6 +11678,28 @@ }, "documentation": "

Specifies the properties and attributes of an endpoint that's associated with an event.

" }, + "PushMessageActivity": { + "type": "structure", + "members": { + "MessageConfig": { + "shape": "JourneyPushMessage", + "documentation": "

Specifies the time to live (TTL) value for push notifications that are sent to participants in a journey.

" + }, + "NextActivity": { + "shape": "__string", + "documentation": "

The unique identifier for the next activity to perform, after the message is sent.

" + }, + "TemplateName": { + "shape": "__string", + "documentation": "

The name of the push notification template to use for the message. If specified, this value must match the name of an existing message template.

" + }, + "TemplateVersion": { + "shape": "__string", + "documentation": "

The unique identifier for the version of the push notification template to use for the message. If specified, this value must match the identifier for an existing template version. To retrieve a list of versions and version identifiers for a template, use the Template Versions resource.

If you don't specify a value for this property, Amazon Pinpoint uses the active version of the template. The active version is typically the version of a template that's been most recently reviewed and approved for use, depending on your workflow. It isn't necessarily the latest version of a template.

" + } + }, + "documentation": "

Specifies the settings for a push notification activity in a journey. This type of activity sends a push notification to participants.

" + }, "PushNotificationTemplateRequest": { "type": "structure", "members": { @@ -11828,7 +11959,7 @@ "members": { "Attributes": { "shape": "MapOf__string", - "documentation": "

A map that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommenderUserIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

This value is null if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

" + "documentation": "

A map that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommendationProviderIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

This value is null if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

" }, "CreationDate": { "shape": "__string", @@ -11868,11 +11999,11 @@ }, "RecommendationsDisplayName": { "shape": "__string", - "documentation": "

The custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores a recommended item for each endpoint or user, depending on the value for the RecommenderUserIdType property. This name appears in the Attribute finder pane of the template editor on the Amazon Pinpoint console.

This value is null if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

" + "documentation": "

The custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores recommended items for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This name appears in the Attribute finder of the template editor on the Amazon Pinpoint console.

This value is null if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

" }, "RecommendationsPerMessage": { "shape": "__integer", - "documentation": "

The number of recommended items that are retrieved from the model for each endpoint or user, depending on the value for the RecommenderUserIdType property. This number determines how many recommended attributes are available for use as message variables in message templates.

" + "documentation": "

The number of recommended items that are retrieved from the model for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This number determines how many recommended items are available for use in message variables.

" } }, "documentation": "

Provides information about Amazon Pinpoint configuration settings for retrieving and processing data from a recommender model.

", @@ -12063,7 +12194,7 @@ }, "MessageType": { "shape": "MessageType", - "documentation": "

The SMS message type. Valid values are: TRANSACTIONAL, the message is critical or time-sensitive, such as a one-time password that supports a customer transaction; and, PROMOTIONAL, the message is not critical or time-sensitive, such as a marketing message.

" + "documentation": "

The SMS message type. Valid values are TRANSACTIONAL (for messages that are critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL (for messsages that aren't critical or time-sensitive, such as marketing messages).

" }, "OriginationNumber": { "shape": "__string", @@ -12080,6 +12211,28 @@ }, "documentation": "

Specifies the default settings for a one-time SMS message that's sent directly to an endpoint.

" }, + "SMSMessageActivity": { + "type": "structure", + "members": { + "MessageConfig": { + "shape": "JourneySMSMessage", + "documentation": "

Specifies the sender ID and message type for an SMS message that's sent to participants in a journey.

" + }, + "NextActivity": { + "shape": "__string", + "documentation": "

The unique identifier for the next activity to perform, after the message is sent.

" + }, + "TemplateName": { + "shape": "__string", + "documentation": "

The name of the SMS message template to use for the message. If specified, this value must match the name of an existing message template.

" + }, + "TemplateVersion": { + "shape": "__string", + "documentation": "

The unique identifier for the version of the SMS template to use for the message. If specified, this value must match the identifier for an existing template version. To retrieve a list of versions and version identifiers for a template, use the Template Versions resource.

If you don't specify a value for this property, Amazon Pinpoint uses the active version of the template. The active version is typically the version of a template that's been most recently reviewed and approved for use, depending on your workflow. It isn't necessarily the latest version of a template.

" + } + }, + "documentation": "

Specifies the settings for an SMS activity in a journey. This type of activity sends a text message to participants.

" + }, "SMSTemplateRequest": { "type": "structure", "members": { @@ -12953,6 +13106,10 @@ "TreatmentResource": { "type": "structure", "members": { + "CustomDeliveryConfiguration": { + "shape": "CustomDeliveryConfiguration", + "documentation": "

The delivery configuration settings for sending the treatment through a custom channel. This object is required if the MessageConfiguration object for the treatment specifies a CustomMessage object.

" + }, "Id": { "shape": "__string", "documentation": "

The unique identifier for the treatment.

" @@ -12983,10 +13140,10 @@ }, "TreatmentName": { "shape": "__string", - "documentation": "

The custom name of the treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

" + "documentation": "

The custom name of the treatment.

" } }, - "documentation": "

Specifies the settings for a campaign treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

", + "documentation": "

Specifies the settings for a campaign treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

", "required": [ "Id", "SizePercent" @@ -13584,11 +13741,11 @@ "members": { "Attributes": { "shape": "MapOf__string", - "documentation": "

A map of key-value pairs that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommenderUserIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

In the map, the key is the name of a custom attribute and the value is a custom display name for that attribute. The display name appears in the Attribute finder pane of the template editor on the Amazon Pinpoint console. The following restrictions apply to these names:

  • An attribute name must start with a letter or number and it can contain up to 50 characters. The characters can be letters, numbers, underscores (_), or hyphens (-). Attribute names are case sensitive and must be unique.

  • An attribute display name must start with a letter or number and it can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-).

This object is required if the configuration invokes an AWS Lambda function (LambdaFunctionArn) to process recommendation data. Otherwise, don't include this object in your request.

" + "documentation": "

A map of key-value pairs that defines 1-10 custom endpoint or user attributes, depending on the value for the RecommendationProviderIdType property. Each of these attributes temporarily stores a recommended item that's retrieved from the recommender model and sent to an AWS Lambda function for additional processing. Each attribute can be used as a message variable in a message template.

In the map, the key is the name of a custom attribute and the value is a custom display name for that attribute. The display name appears in the Attribute finder of the template editor on the Amazon Pinpoint console. The following restrictions apply to these names:

  • An attribute name must start with a letter or number and it can contain up to 50 characters. The characters can be letters, numbers, underscores (_), or hyphens (-). Attribute names are case sensitive and must be unique.

  • An attribute display name must start with a letter or number and it can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-).

This object is required if the configuration invokes an AWS Lambda function (RecommendationTransformerUri) to process recommendation data. Otherwise, don't include this object in your request.

" }, "Description": { "shape": "__string", - "documentation": "

A custom description of the configuration for the recommender model. The description can contain up to 128 characters.

" + "documentation": "

A custom description of the configuration for the recommender model. The description can contain up to 128 characters. The characters can be letters, numbers, spaces, or the following symbols: _ ; () , ‐.

" }, "Name": { "shape": "__string", @@ -13596,7 +13753,7 @@ }, "RecommendationProviderIdType": { "shape": "__string", - "documentation": "

The type of Amazon Pinpoint ID to associate with unique user IDs in the recommender model. This value enables the model to use attribute and event data that’s specific to a particular endpoint or user in an Amazon Pinpoint application. Valid values are:

  • PINPOINT_ENDPOINT_ID - Associate each user in the model with a particular endpoint in Amazon Pinpoint. The data is correlated based on endpoint IDs in Amazon Pinpoint. This is the default value.

  • PINPOINT_USER_ID - Associate each user in the model with a particular user and endpoint in Amazon Pinpoint. The data is correlated based on user IDs in Amazon Pinpoint. If you specify this value, an endpoint definition in Amazon Pinpoint has to specify a both a user ID (UserId) and an endpoint ID. Otherwise, messages won’t be sent to the user's endpoint.

" + "documentation": "

The type of Amazon Pinpoint ID to associate with unique user IDs in the recommender model. This value enables the model to use attribute and event data that’s specific to a particular endpoint or user in an Amazon Pinpoint application. Valid values are:

  • PINPOINT_ENDPOINT_ID - Associate each user in the model with a particular endpoint in Amazon Pinpoint. The data is correlated based on endpoint IDs in Amazon Pinpoint. This is the default value.

  • PINPOINT_USER_ID - Associate each user in the model with a particular user and endpoint in Amazon Pinpoint. The data is correlated based on user IDs in Amazon Pinpoint. If you specify this value, an endpoint definition in Amazon Pinpoint has to specify both a user ID (UserId) and an endpoint ID. Otherwise, messages won’t be sent to the user's endpoint.

" }, "RecommendationProviderRoleArn": { "shape": "__string", @@ -13612,11 +13769,11 @@ }, "RecommendationsDisplayName": { "shape": "__string", - "documentation": "

A custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores a recommended item for each endpoint or user, depending on the value for the RecommenderUserIdType property. This value is required if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

This name appears in the Attribute finder pane of the template editor on the Amazon Pinpoint console. The name can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions don't apply to attribute values.

" + "documentation": "

A custom display name for the standard endpoint or user attribute (RecommendationItems) that temporarily stores recommended items for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This value is required if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

This name appears in the Attribute finder of the template editor on the Amazon Pinpoint console. The name can contain up to 25 characters. The characters can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions don't apply to attribute values.

" }, "RecommendationsPerMessage": { "shape": "__integer", - "documentation": "

The number of recommended items to retrieve from the model for each endpoint or user, depending on the value for the RecommenderUserIdType property. This number determines how many recommended attributes are available for use as message variables in message templates. The minimum value is 1. The maximum value is 5. The default value is 5.

To use multiple recommended items and custom attributes with message variables, you have to use an AWS Lambda function (LambdaFunctionArn) to perform additional processing of recommendation data.

" + "documentation": "

The number of recommended items to retrieve from the model for each endpoint or user, depending on the value for the RecommendationProviderIdType property. This number determines how many recommended items are available for use in message variables. The minimum value is 1. The maximum value is 5. The default value is 5.

To use multiple recommended items and custom attributes with message variables, you have to use an AWS Lambda function (RecommendationTransformerUri) to perform additional processing of recommendation data.

" } }, "documentation": "

Specifies Amazon Pinpoint configuration settings for retrieving and processing recommendation data from a recommender model.

", @@ -14090,7 +14247,7 @@ "members": { "CampaignHook": { "shape": "CampaignHook", - "documentation": "

The settings for the AWS Lambda function to use by default as a code hook for campaigns in the application. To override these settings for a specific campaign, use the Campaign resource to define custom Lambda function settings for the campaign.

" + "documentation": "

The settings for the AWS Lambda function to invoke by default as a code hook for campaigns in the application. You can use this hook to customize segments that are used by campaigns in the application.

To override these settings and define custom settings for a specific campaign, use the CampaignHook object of the Campaign resource.

" }, "CloudWatchMetricsEnabled": { "shape": "__boolean", @@ -14098,7 +14255,7 @@ }, "Limits": { "shape": "CampaignLimits", - "documentation": "

The default sending limits for campaigns in the application. To override these limits for a specific campaign, use the Campaign resource to define custom limits for the campaign.

" + "documentation": "

The default sending limits for campaigns and journeys in the application. To override these limits and define custom limits for a specific campaign or journey, use the Campaign resource or the Journey resource, respectively.

" }, "QuietTime": { "shape": "QuietTime", @@ -14114,6 +14271,10 @@ "shape": "ListOfWriteTreatmentResource", "documentation": "

An array of requests that defines additional treatments for the campaign, in addition to the default treatment for the campaign.

" }, + "CustomDeliveryConfiguration": { + "shape": "CustomDeliveryConfiguration", + "documentation": "

The delivery configuration settings for sending the campaign through a custom channel. This object is required if the MessageConfiguration object for the campaign specifies a CustomMessage object.

" + }, "Description": { "shape": "__string", "documentation": "

A custom description of the campaign.

" @@ -14124,11 +14285,11 @@ }, "Hook": { "shape": "CampaignHook", - "documentation": "

The settings for the AWS Lambda function to use as a code hook for the campaign.

" + "documentation": "

The settings for the AWS Lambda function to invoke as a code hook for the campaign. You can use this hook to customize the segment that's used by the campaign.

" }, "IsPaused": { "shape": "__boolean", - "documentation": "

Specifies whether to pause the campaign. A paused campaign doesn't run unless you resume it by setting this value to false.

" + "documentation": "

Specifies whether to pause the campaign. A paused campaign doesn't run unless you resume it by changing this value to false.

" }, "Limits": { "shape": "CampaignLimits", @@ -14165,11 +14326,11 @@ }, "TreatmentDescription": { "shape": "__string", - "documentation": "

A custom description of a variation of the campaign to use for A/B testing.

" + "documentation": "

A custom description of the default treatment for the campaign.

" }, "TreatmentName": { "shape": "__string", - "documentation": "

A custom name for a variation of the campaign to use for A/B testing.

" + "documentation": "

A custom name of the default treatment for the campaign, if the campaign has multiple treatments. A treatment is a variation of a campaign that's used for A/B testing.

" } }, "documentation": "

Specifies the configuration and other settings for a campaign.

" @@ -14275,6 +14436,10 @@ "WriteTreatmentResource": { "type": "structure", "members": { + "CustomDeliveryConfiguration": { + "shape": "CustomDeliveryConfiguration", + "documentation": "

The delivery configuration settings for sending the treatment through a custom channel. This object is required if the MessageConfiguration object for the treatment specifies a CustomMessage object.

" + }, "MessageConfiguration": { "shape": "MessageConfiguration", "documentation": "

The message configuration settings for the treatment.

" @@ -14297,14 +14462,31 @@ }, "TreatmentName": { "shape": "__string", - "documentation": "

A custom name for the treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

" + "documentation": "

A custom name for the treatment.

" } }, - "documentation": "

Specifies the settings for a campaign treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

", + "documentation": "

Specifies the settings for a campaign treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

", "required": [ "SizePercent" ] }, + "__EndpointTypesElement": { + "type": "string", + "enum": [ + "PUSH", + "GCM", + "APNS", + "APNS_SANDBOX", + "APNS_VOIP", + "APNS_VOIP_SANDBOX", + "ADM", + "SMS", + "VOICE", + "EMAIL", + "BAIDU", + "CUSTOM" + ] + }, "__boolean": { "type": "boolean" }, @@ -14446,6 +14628,12 @@ "shape": "WriteTreatmentResource" } }, + "ListOf__EndpointTypesElement": { + "type": "list", + "member": { + "shape": "__EndpointTypesElement" + } + }, "ListOf__string": { "type": "list", "member": { diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index 4511b6bed3eb..17b5ecfd2c78 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index 6dc99643dcc8..dc1c5a2c5645 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/polly/pom.xml b/services/polly/pom.xml index c8525edee856..6c5b7483821f 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/polly/src/main/resources/codegen-resources/service-2.json b/services/polly/src/main/resources/codegen-resources/service-2.json index a3c4055fedf6..314149d34c98 100644 --- a/services/polly/src/main/resources/codegen-resources/service-2.json +++ b/services/polly/src/main/resources/codegen-resources/service-2.json @@ -1035,6 +1035,7 @@ "Justin", "Karl", "Kendra", + "Kevin", "Kimberly", "Lea", "Liv", diff --git a/services/pom.xml b/services/pom.xml index bec243b37727..dca4b7508e83 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -12,14 +12,12 @@ ~ on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ~ express or implied. See the License for the specific language governing ~ permissions and limitations under the License. - --> - - + --> 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT services AWS Java SDK :: Services @@ -241,6 +239,12 @@ kinesisvideosignaling detective codestarconnections + synthetics + iotsitewise + macie2 + codeartifact + honeycode + ivs The AWS Java SDK services https://aws.amazon.com/sdkforjava @@ -291,6 +295,11 @@ software.amazon.awssdk ${awsjavasdk.version}
+ + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + apache-client software.amazon.awssdk diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index 13b11369fd48..997a1c168ae8 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 pricing diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index 4f7d17551402..d4f646652461 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldb/src/main/resources/codegen-resources/paginators-1.json b/services/qldb/src/main/resources/codegen-resources/paginators-1.json index 696cd5b2bef1..dcebff0861bd 100644 --- a/services/qldb/src/main/resources/codegen-resources/paginators-1.json +++ b/services/qldb/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,10 @@ { "pagination": { + "ListJournalKinesisStreamsForLedger": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListJournalS3Exports": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/qldb/src/main/resources/codegen-resources/service-2.json b/services/qldb/src/main/resources/codegen-resources/service-2.json index ab53e53a15ac..e60515dbb911 100644 --- a/services/qldb/src/main/resources/codegen-resources/service-2.json +++ b/services/qldb/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,21 @@ "uid":"qldb-2019-01-02" }, "operations":{ + "CancelJournalKinesisStream":{ + "name":"CancelJournalKinesisStream", + "http":{ + "method":"DELETE", + "requestUri":"/ledgers/{name}/journal-kinesis-streams/{streamId}" + }, + "input":{"shape":"CancelJournalKinesisStreamRequest"}, + "output":{"shape":"CancelJournalKinesisStreamResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

Ends a given Amazon QLDB journal stream. Before a stream can be canceled, its current status must be ACTIVE.

You can't restart a stream after you cancel it. Canceled QLDB stream resources are subject to a 7-day retention period, so they are automatically deleted after this limit expires.

" + }, "CreateLedger":{ "name":"CreateLedger", "http":{ @@ -44,6 +59,21 @@ ], "documentation":"

Deletes a ledger and all of its contents. This action is irreversible.

If deletion protection is enabled, you must first disable it before you can delete the ledger using the QLDB API or the AWS Command Line Interface (AWS CLI). You can disable it by calling the UpdateLedger operation to set the flag to false. The QLDB console disables deletion protection for you when you use it to delete a ledger.

" }, + "DescribeJournalKinesisStream":{ + "name":"DescribeJournalKinesisStream", + "http":{ + "method":"GET", + "requestUri":"/ledgers/{name}/journal-kinesis-streams/{streamId}" + }, + "input":{"shape":"DescribeJournalKinesisStreamRequest"}, + "output":{"shape":"DescribeJournalKinesisStreamResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

Returns detailed information about a given Amazon QLDB journal stream. The output includes the Amazon Resource Name (ARN), stream name, current status, creation time, and the parameters of your original stream creation request.

" + }, "DescribeJournalS3Export":{ "name":"DescribeJournalS3Export", "http":{ @@ -55,7 +85,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns information about a journal export job, including the ledger name, export ID, when it was created, current status, and its start and end time export parameters.

If the export job with the given ExportId doesn't exist, then throws ResourceNotFoundException.

If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException.

" + "documentation":"

Returns information about a journal export job, including the ledger name, export ID, when it was created, current status, and its start and end time export parameters.

This action does not return any expired export jobs. For more information, see Export Job Expiration in the Amazon QLDB Developer Guide.

If the export job with the given ExportId doesn't exist, then throws ResourceNotFoundException.

If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException.

" }, "DescribeLedger":{ "name":"DescribeLedger", @@ -98,7 +128,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourcePreconditionNotMetException"} ], - "documentation":"

Returns a journal block object at a specified address in a ledger. Also returns a proof of the specified block for verification if DigestTipAddress is provided.

If the specified ledger doesn't exist or is in DELETING status, then throws ResourceNotFoundException.

If the specified ledger is in CREATING status, then throws ResourcePreconditionNotMetException.

If no block exists with the specified address, then throws InvalidParameterException.

" + "documentation":"

Returns a block object at a specified address in a journal. Also returns a proof of the specified block for verification if DigestTipAddress is provided.

For information about the data contents in a block, see Journal contents in the Amazon QLDB Developer Guide.

If the specified ledger doesn't exist or is in DELETING status, then throws ResourceNotFoundException.

If the specified ledger is in CREATING status, then throws ResourcePreconditionNotMetException.

If no block exists with the specified address, then throws InvalidParameterException.

" }, "GetDigest":{ "name":"GetDigest", @@ -130,6 +160,21 @@ ], "documentation":"

Returns a revision data object for a specified document ID and block address. Also returns a proof of the specified revision for verification if DigestTipAddress is provided.

" }, + "ListJournalKinesisStreamsForLedger":{ + "name":"ListJournalKinesisStreamsForLedger", + "http":{ + "method":"GET", + "requestUri":"/ledgers/{name}/journal-kinesis-streams" + }, + "input":{"shape":"ListJournalKinesisStreamsForLedgerRequest"}, + "output":{"shape":"ListJournalKinesisStreamsForLedgerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

Returns an array of all Amazon QLDB journal stream descriptors for a given ledger. The output of each stream descriptor includes the same details that are returned by DescribeJournalKinesisStream.

This action returns a maximum of MaxResults items. It is paginated so that you can retrieve all the items by calling ListJournalKinesisStreamsForLedger multiple times.

" + }, "ListJournalS3Exports":{ "name":"ListJournalS3Exports", "http":{ @@ -138,7 +183,7 @@ }, "input":{"shape":"ListJournalS3ExportsRequest"}, "output":{"shape":"ListJournalS3ExportsResponse"}, - "documentation":"

Returns an array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3Exports multiple times.

" + "documentation":"

Returns an array of journal export job descriptions for all ledgers that are associated with the current AWS account and Region.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3Exports multiple times.

This action does not return any expired export jobs. For more information, see Export Job Expiration in the Amazon QLDB Developer Guide.

" }, "ListJournalS3ExportsForLedger":{ "name":"ListJournalS3ExportsForLedger", @@ -148,7 +193,7 @@ }, "input":{"shape":"ListJournalS3ExportsForLedgerRequest"}, "output":{"shape":"ListJournalS3ExportsForLedgerResponse"}, - "documentation":"

Returns an array of journal export job descriptions for a specified ledger.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3ExportsForLedger multiple times.

" + "documentation":"

Returns an array of journal export job descriptions for a specified ledger.

This action returns a maximum of MaxResults items, and is paginated so that you can retrieve all the items by calling ListJournalS3ExportsForLedger multiple times.

This action does not return any expired export jobs. For more information, see Export Job Expiration in the Amazon QLDB Developer Guide.

" }, "ListLedgers":{ "name":"ListLedgers", @@ -174,6 +219,21 @@ ], "documentation":"

Returns all tags for a specified Amazon QLDB resource.

" }, + "StreamJournalToKinesis":{ + "name":"StreamJournalToKinesis", + "http":{ + "method":"POST", + "requestUri":"/ledgers/{name}/journal-kinesis-streams" + }, + "input":{"shape":"StreamJournalToKinesisRequest"}, + "output":{"shape":"StreamJournalToKinesisResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourcePreconditionNotMetException"} + ], + "documentation":"

Creates a journal stream for a given Amazon QLDB ledger. The stream captures every document revision that is committed to the ledger's journal and delivers the data to a specified Amazon Kinesis Data Streams resource.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -223,6 +283,37 @@ "max":1600, "min":20 }, + "Boolean":{"type":"boolean"}, + "CancelJournalKinesisStreamRequest":{ + "type":"structure", + "required":[ + "LedgerName", + "StreamId" + ], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + }, + "StreamId":{ + "shape":"UniqueId", + "documentation":"

The unique ID that QLDB assigns to each QLDB journal stream.

", + "location":"uri", + "locationName":"streamId" + } + } + }, + "CancelJournalKinesisStreamResponse":{ + "type":"structure", + "members":{ + "StreamId":{ + "shape":"UniqueId", + "documentation":"

The unique ID that QLDB assigns to each QLDB journal stream.

" + } + } + }, "CreateLedgerRequest":{ "type":"structure", "required":[ @@ -232,7 +323,7 @@ "members":{ "Name":{ "shape":"LedgerName", - "documentation":"

The name of the ledger that you want to create. The name must be unique among all of your ledgers in the current AWS Region.

" + "documentation":"

The name of the ledger that you want to create. The name must be unique among all of your ledgers in the current AWS Region.

Naming constraints for ledger names are defined in Quotas in Amazon QLDB in the Amazon QLDB Developer Guide.

" }, "Tags":{ "shape":"Tags", @@ -286,6 +377,36 @@ } }, "DeletionProtection":{"type":"boolean"}, + "DescribeJournalKinesisStreamRequest":{ + "type":"structure", + "required":[ + "LedgerName", + "StreamId" + ], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + }, + "StreamId":{ + "shape":"UniqueId", + "documentation":"

The unique ID that QLDB assigns to each QLDB journal stream.

", + "location":"uri", + "locationName":"streamId" + } + } + }, + "DescribeJournalKinesisStreamResponse":{ + "type":"structure", + "members":{ + "Stream":{ + "shape":"JournalKinesisStreamDescription", + "documentation":"

Information about the QLDB journal stream returned by a DescribeJournalS3Export request.

" + } + } + }, "DescribeJournalS3ExportRequest":{ "type":"structure", "required":[ @@ -359,6 +480,13 @@ "max":32, "min":32 }, + "ErrorCause":{ + "type":"string", + "enum":[ + "KINESIS_STREAM_NOT_FOUND", + "IAM_PERMISSION_REVOKED" + ] + }, "ErrorMessage":{"type":"string"}, "ExportJournalToS3Request":{ "type":"structure", @@ -539,6 +667,68 @@ "min":1, "sensitive":true }, + "JournalKinesisStreamDescription":{ + "type":"structure", + "required":[ + "LedgerName", + "RoleArn", + "StreamId", + "Status", + "KinesisConfiguration", + "StreamName" + ], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The date and time, in epoch time format, when the QLDB journal stream was created. (Epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.)

" + }, + "InclusiveStartTime":{ + "shape":"Timestamp", + "documentation":"

The inclusive start date and time from which to start streaming journal data.

" + }, + "ExclusiveEndTime":{ + "shape":"Timestamp", + "documentation":"

The exclusive date and time that specifies when the stream ends. If this parameter is blank, the stream runs indefinitely until you cancel it.

" + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal stream to write data records to a Kinesis Data Streams resource.

" + }, + "StreamId":{ + "shape":"UniqueId", + "documentation":"

The unique ID that QLDB assigns to each QLDB journal stream.

" + }, + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the QLDB journal stream.

" + }, + "Status":{ + "shape":"StreamStatus", + "documentation":"

The current state of the QLDB journal stream.

" + }, + "KinesisConfiguration":{ + "shape":"KinesisConfiguration", + "documentation":"

The configuration settings of the Amazon Kinesis Data Streams destination for your QLDB journal stream.

" + }, + "ErrorCause":{ + "shape":"ErrorCause", + "documentation":"

The error message that describes the reason that a stream has a status of IMPAIRED or FAILED. This is not applicable to streams that have other status values.

" + }, + "StreamName":{ + "shape":"StreamName", + "documentation":"

The user-defined name of the QLDB journal stream.

" + } + }, + "documentation":"

The information about an Amazon QLDB journal stream, including the Amazon Resource Name (ARN), stream name, creation time, current status, and the parameters of your original stream creation request.

" + }, + "JournalKinesisStreamDescriptionList":{ + "type":"list", + "member":{"shape":"JournalKinesisStreamDescription"} + }, "JournalS3ExportDescription":{ "type":"structure", "required":[ @@ -588,6 +778,21 @@ "type":"list", "member":{"shape":"JournalS3ExportDescription"} }, + "KinesisConfiguration":{ + "type":"structure", + "required":["StreamArn"], + "members":{ + "StreamArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Kinesis data stream resource.

" + }, + "AggregationEnabled":{ + "shape":"Boolean", + "documentation":"

Enables QLDB to publish multiple data records in a single Kinesis Data Streams record. To learn more, see KPL Key Concepts in the Amazon Kinesis Data Streams Developer Guide.

" + } + }, + "documentation":"

The configuration settings of the Amazon Kinesis Data Streams destination for your Amazon QLDB journal stream.

" + }, "LedgerList":{ "type":"list", "member":{"shape":"LedgerSummary"} @@ -638,6 +843,43 @@ "error":{"httpStatusCode":400}, "exception":true }, + "ListJournalKinesisStreamsForLedgerRequest":{ + "type":"structure", + "required":["LedgerName"], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in a single ListJournalKinesisStreamsForLedger request. (The actual number of results returned might be fewer.)

", + "location":"querystring", + "locationName":"max_results" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token, indicating that you want to retrieve the next page of results. If you received a value for NextToken in the response from a previous ListJournalKinesisStreamsForLedger call, you should use that value as input here.

", + "location":"querystring", + "locationName":"next_token" + } + } + }, + "ListJournalKinesisStreamsForLedgerResponse":{ + "type":"structure", + "members":{ + "Streams":{ + "shape":"JournalKinesisStreamDescriptionList", + "documentation":"

The array of QLDB journal stream descriptors that are associated with the given ledger.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"
  • If NextToken is empty, the last page of results has been processed and there are no more results to be retrieved.

  • If NextToken is not empty, more results are available. To retrieve the next page of results, use the value of NextToken in a subsequent ListJournalKinesisStreamsForLedger call.

" + } + } + }, "ListJournalS3ExportsForLedgerRequest":{ "type":"structure", "required":["Name"], @@ -858,7 +1100,7 @@ }, "KmsKeyArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for a customer master key (CMK) in AWS Key Management Service (AWS KMS).

You must provide a KmsKeyArn if you specify SSE_KMS as the ObjectEncryptionType.

KmsKeyArn is not required if you specify SSE_S3 as the ObjectEncryptionType.

" + "documentation":"

The Amazon Resource Name (ARN) for a symmetric customer master key (CMK) in AWS Key Management Service (AWS KMS). Amazon QLDB does not support asymmetric CMKs.

You must provide a KmsKeyArn if you specify SSE_KMS as the ObjectEncryptionType.

KmsKeyArn is not required if you specify SSE_S3 as the ObjectEncryptionType.

" } }, "documentation":"

The encryption settings that are used by a journal export job to write data in an Amazon Simple Storage Service (Amazon S3) bucket.

" @@ -899,6 +1141,73 @@ "max":128, "min":0 }, + "StreamJournalToKinesisRequest":{ + "type":"structure", + "required":[ + "LedgerName", + "RoleArn", + "InclusiveStartTime", + "KinesisConfiguration", + "StreamName" + ], + "members":{ + "LedgerName":{ + "shape":"LedgerName", + "documentation":"

The name of the ledger.

", + "location":"uri", + "locationName":"name" + }, + "RoleArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal stream to write data records to a Kinesis Data Streams resource.

" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

The key-value pairs to add as tags to the stream that you want to create. Tag keys are case sensitive. Tag values are case sensitive and can be null.

" + }, + "InclusiveStartTime":{ + "shape":"Timestamp", + "documentation":"

The inclusive start date and time from which to start streaming journal data. This parameter must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

The InclusiveStartTime cannot be in the future and must be before ExclusiveEndTime.

If you provide an InclusiveStartTime that is before the ledger's CreationDateTime, QLDB effectively defaults it to the ledger's CreationDateTime.

" + }, + "ExclusiveEndTime":{ + "shape":"Timestamp", + "documentation":"

The exclusive date and time that specifies when the stream ends. If you don't define this parameter, the stream runs indefinitely until you cancel it.

The ExclusiveEndTime must be in ISO 8601 date and time format and in Universal Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z

" + }, + "KinesisConfiguration":{ + "shape":"KinesisConfiguration", + "documentation":"

The configuration settings of the Kinesis Data Streams destination for your stream request.

" + }, + "StreamName":{ + "shape":"StreamName", + "documentation":"

The name that you want to assign to the QLDB journal stream. User-defined names can help identify and indicate the purpose of a stream.

Your stream name must be unique among other active streams for a given ledger. Stream names have the same naming constraints as ledger names, as defined in Quotas in Amazon QLDB in the Amazon QLDB Developer Guide.

" + } + } + }, + "StreamJournalToKinesisResponse":{ + "type":"structure", + "members":{ + "StreamId":{ + "shape":"UniqueId", + "documentation":"

The unique ID that QLDB assigns to each QLDB journal stream.

" + } + } + }, + "StreamName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"(?!^.*--)(?!^[0-9]+$)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" + }, + "StreamStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "COMPLETED", + "CANCELED", + "FAILED", + "IMPAIRED" + ] + }, "TagKey":{ "type":"string", "max":128, @@ -1028,7 +1337,7 @@ "documentation":"

An Amazon Ion plaintext value contained in a ValueHolder structure.

" } }, - "documentation":"

A structure that can contain an Amazon Ion value in multiple encoding formats.

", + "documentation":"

A structure that can contain a value in multiple encoding formats.

", "sensitive":true } }, diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index 77c90abc484f..e2f81460372f 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/qldbsession/src/main/resources/codegen-resources/service-2.json b/services/qldbsession/src/main/resources/codegen-resources/service-2.json index 4cbf48b66054..44b166288052 100644 --- a/services/qldbsession/src/main/resources/codegen-resources/service-2.json +++ b/services/qldbsession/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"RateExceededException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Sends a command to an Amazon QLDB ledger.

" + "documentation":"

Sends a command to an Amazon QLDB ledger.

Instead of interacting directly with this API, we recommend that you use the Amazon QLDB Driver or the QLDB Shell to execute data transactions on a ledger.

  • If you are working with an AWS SDK, use the QLDB Driver. The driver provides a high-level abstraction layer above this qldbsession data plane and manages SendCommand API calls for you. For information and a list of supported programming languages, see Getting started with the driver in the Amazon QLDB Developer Guide.

  • If you are working with the AWS Command Line Interface (AWS CLI), use the QLDB Shell. The shell is a command line interface that uses the QLDB Driver to interact with a ledger. For information, see Accessing Amazon QLDB using the QLDB Shell.

" } }, "shapes":{ @@ -64,7 +64,7 @@ "members":{ "TransactionId":{ "shape":"TransactionId", - "documentation":"

Specifies the transaction id of the transaction to commit.

" + "documentation":"

Specifies the transaction ID of the transaction to commit.

" }, "CommitDigest":{ "shape":"CommitDigest", @@ -78,7 +78,7 @@ "members":{ "TransactionId":{ "shape":"TransactionId", - "documentation":"

The transaction id of the committed transaction.

" + "documentation":"

The transaction ID of the committed transaction.

" }, "CommitDigest":{ "shape":"CommitDigest", @@ -110,7 +110,7 @@ "members":{ "TransactionId":{ "shape":"TransactionId", - "documentation":"

Specifies the transaction id of the request.

" + "documentation":"

Specifies the transaction ID of the request.

" }, "Statement":{ "shape":"Statement", @@ -142,7 +142,7 @@ "members":{ "TransactionId":{ "shape":"TransactionId", - "documentation":"

Specifies the transaction id of the page to be fetched.

" + "documentation":"

Specifies the transaction ID of the page to be fetched.

" }, "NextPageToken":{ "shape":"PageToken", @@ -167,7 +167,7 @@ "Message":{"shape":"ErrorMessage"}, "Code":{"shape":"ErrorCode"} }, - "documentation":"

Returned if the session doesn't exist anymore because it timed-out or expired.

", + "documentation":"

Returned if the session doesn't exist anymore because it timed out or expired.

", "exception":true }, "IonBinary":{ @@ -199,7 +199,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Returned when a transaction cannot be written to the journal due to a failure in the verification phase of Optimistic Concurrency Control.

", + "documentation":"

Returned when a transaction cannot be written to the journal due to a failure in the verification phase of optimistic concurrency control (OCC).

", "exception":true }, "Page":{ @@ -315,7 +315,7 @@ "documentation":"

The name of the ledger to start a new session against.

" } }, - "documentation":"

Specifies a request to start a a new session.

" + "documentation":"

Specifies a request to start a new session.

" }, "StartSessionResult":{ "type":"structure", @@ -338,7 +338,7 @@ "members":{ "TransactionId":{ "shape":"TransactionId", - "documentation":"

The transaction id of the started transaction.

" + "documentation":"

The transaction ID of the started transaction.

" } }, "documentation":"

Contains the details of the started transaction.

" @@ -363,19 +363,19 @@ "members":{ "IonBinary":{ "shape":"IonBinary", - "documentation":"

An Amazon Ion binary value contained in a ValueHolder structure.

" + "documentation":"

An Amazon Ion binary value contained in a ValueHolder structure.

" }, "IonText":{ "shape":"IonText", - "documentation":"

An Amazon Ion plaintext value contained in a ValueHolder structure.

" + "documentation":"

An Amazon Ion plaintext value contained in a ValueHolder structure.

" } }, - "documentation":"

A structure that can contains values in multiple encoding formats.

" + "documentation":"

A structure that can contain an Amazon Ion value in multiple encoding formats.

" }, "ValueHolders":{ "type":"list", "member":{"shape":"ValueHolder"} } }, - "documentation":"

The transactional data APIs for Amazon QLDB

" + "documentation":"

The transactional data APIs for Amazon QLDB

Instead of interacting directly with this API, we recommend that you use the Amazon QLDB Driver or the QLDB Shell to execute data transactions on a ledger.

  • If you are working with an AWS SDK, use the QLDB Driver. The driver provides a high-level abstraction layer above this qldbsession data plane and manages SendCommand API calls for you. For information and a list of supported programming languages, see Getting started with the driver in the Amazon QLDB Developer Guide.

  • If you are working with the AWS Command Line Interface (AWS CLI), use the QLDB Shell. The shell is a command line interface that uses the QLDB Driver to interact with a ledger. For information, see Accessing Amazon QLDB using the QLDB Shell.

" } diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index 952dcf8dcf38..c7a51bc23a0a 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/quicksight/src/main/resources/codegen-resources/service-2.json b/services/quicksight/src/main/resources/codegen-resources/service-2.json index c7fdcb203178..58f35b527fd5 100644 --- a/services/quicksight/src/main/resources/codegen-resources/service-2.json +++ b/services/quicksight/src/main/resources/codegen-resources/service-2.json @@ -46,7 +46,7 @@ {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Creates a dashboard from a template. To first create a template, see the CreateTemplate API operation.

A dashboard is an entity in QuickSight that identifies QuickSight reports, created from analyses. You can share QuickSight dashboards. With the right permissions, you can create scheduled email reports from them. The CreateDashboard, DescribeDashboard, and ListDashboardsByUser API operations act on the dashboard entity. If you have the correct permissions, you can create a dashboard from a template that exists in a different AWS account.

" + "documentation":"

Creates a dashboard from a template. To first create a template, see the CreateTemplate API operation.

A dashboard is an entity in QuickSight that identifies QuickSight reports, created from analyses. You can share QuickSight dashboards. With the right permissions, you can create scheduled email reports from them. The CreateDashboard, DescribeDashboard, and ListDashboardsByUser API operations act on the dashboard entity. If you have the correct permissions, you can create a dashboard from a template that exists in a different AWS account.

" }, "CreateDataSet":{ "name":"CreateDataSet", @@ -165,7 +165,7 @@ {"shape":"ResourceExistsException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Creates and starts a new SPICE ingestion on a dataset

Any ingestions operating on tagged datasets inherit the same tags automatically for use in access control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using tags? in the AWS Knowledge Center. Tags are visible on the tagged dataset, but not on the ingestion resource.

" + "documentation":"

Creates and starts a new SPICE ingestion on a dataset

Any ingestions operating on tagged datasets inherit the same tags automatically for use in access control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using tags? in the AWS Knowledge Center. Tags are visible on the tagged dataset, but not on the ingestion resource.

" }, "CreateTemplate":{ "name":"CreateTemplate", @@ -183,6 +183,7 @@ {"shape":"ThrottlingException"}, {"shape":"LimitExceededException"}, {"shape":"UnsupportedUserEditionException"}, + {"shape":"ConflictException"}, {"shape":"InternalFailureException"} ], "documentation":"

Creates a template from an existing QuickSight analysis or template. You can use the resulting template to create a dashboard.

A template is an entity in QuickSight that encapsulates the metadata required to create an analysis and that you can use to create s dashboard. A template adds a layer of abstraction by using placeholders to replace the dataset associated with the analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets that follow the same schema that was used to create the source analysis and template.

" @@ -202,10 +203,51 @@ {"shape":"ResourceExistsException"}, {"shape":"LimitExceededException"}, {"shape":"UnsupportedUserEditionException"}, + {"shape":"ConflictException"}, {"shape":"InternalFailureException"} ], "documentation":"

Creates a template alias for a template.

" }, + "CreateTheme":{ + "name":"CreateTheme", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}" + }, + "input":{"shape":"CreateThemeRequest"}, + "output":{"shape":"CreateThemeResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Creates a theme.

A theme is set of configuration options for color and layout. Themes apply to analyses and dashboards. For more information, see Using Themes in Amazon QuickSight in the Amazon QuickSight User Guide.

" + }, + "CreateThemeAlias":{ + "name":"CreateThemeAlias", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}/aliases/{AliasName}" + }, + "input":{"shape":"CreateThemeAliasRequest"}, + "output":{"shape":"CreateThemeAliasResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Creates a theme alias for a theme.

" + }, "DeleteDashboard":{ "name":"DeleteDashboard", "http":{ @@ -346,10 +388,48 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"UnsupportedUserEditionException"}, + {"shape":"ConflictException"}, {"shape":"InternalFailureException"} ], "documentation":"

Deletes the item that the specified template alias points to. If you provide a specific alias, you delete the version of the template that the alias points to.

" }, + "DeleteTheme":{ + "name":"DeleteTheme", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}" + }, + "input":{"shape":"DeleteThemeRequest"}, + "output":{"shape":"DeleteThemeResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Deletes a theme.

" + }, + "DeleteThemeAlias":{ + "name":"DeleteThemeAlias", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}/aliases/{AliasName}" + }, + "input":{"shape":"DeleteThemeAliasRequest"}, + "output":{"shape":"DeleteThemeAliasResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Deletes the version of the theme that the specified theme alias points to. If you provide a specific alias, you delete the version of the theme that the alias points to.

" + }, "DeleteUser":{ "name":"DeleteUser", "http":{ @@ -598,6 +678,61 @@ ], "documentation":"

Describes read and write permissions on a template.

" }, + "DescribeTheme":{ + "name":"DescribeTheme", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}" + }, + "input":{"shape":"DescribeThemeRequest"}, + "output":{"shape":"DescribeThemeResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Describes a theme.

" + }, + "DescribeThemeAlias":{ + "name":"DescribeThemeAlias", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}/aliases/{AliasName}" + }, + "input":{"shape":"DescribeThemeAliasRequest"}, + "output":{"shape":"DescribeThemeAliasResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Describes the alias for a theme.

" + }, + "DescribeThemePermissions":{ + "name":"DescribeThemePermissions", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}/permissions" + }, + "input":{"shape":"DescribeThemePermissionsRequest"}, + "output":{"shape":"DescribeThemePermissionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Describes the read and write permissions for a theme.

" + }, "DescribeUser":{ "name":"DescribeUser", "http":{ @@ -637,7 +772,7 @@ {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Generates a server-side embeddable URL and authorization code. For this process to work properly, first configure the dashboards and user permissions. For more information, see Embedding Amazon QuickSight Dashboards in the Amazon QuickSight User Guide or Embedding Amazon QuickSight Dashboards in the Amazon QuickSight API Reference.

Currently, you can use GetDashboardEmbedURL only from the server, not from the user’s browser.

" + "documentation":"

Generates a URL and authorization code that you can embed in your web server code. Before you use this command, make sure that you have configured the dashboards and permissions.

Currently, you can use GetDashboardEmbedURL only from the server, not from the user's browser. The following rules apply to the combination of URL and authorization code:

  • They must be used together.

  • They can be used one time only.

  • They are valid for 5 minutes after you run this command.

  • The resulting user session is valid for 10 hours.

For more information, see Embedding Amazon QuickSight Dashboards in the Amazon QuickSight User Guide or Embedding Amazon QuickSight Dashboards in the Amazon QuickSight API Reference.

" }, "ListDashboardVersions":{ "name":"ListDashboardVersions", @@ -829,6 +964,7 @@ "input":{"shape":"ListTemplateAliasesRequest"}, "output":{"shape":"ListTemplateAliasesResponse"}, "errors":[ + {"shape":"InvalidNextTokenException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"UnsupportedUserEditionException"}, @@ -872,6 +1008,63 @@ ], "documentation":"

Lists all the templates in the current Amazon QuickSight account.

" }, + "ListThemeAliases":{ + "name":"ListThemeAliases", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}/aliases" + }, + "input":{"shape":"ListThemeAliasesRequest"}, + "output":{"shape":"ListThemeAliasesResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists all the aliases of a theme.

" + }, + "ListThemeVersions":{ + "name":"ListThemeVersions", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}/versions" + }, + "input":{"shape":"ListThemeVersionsRequest"}, + "output":{"shape":"ListThemeVersionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists all the versions of the themes in the current AWS account.

" + }, + "ListThemes":{ + "name":"ListThemes", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/themes" + }, + "input":{"shape":"ListThemesRequest"}, + "output":{"shape":"ListThemesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists all the themes in the current AWS account.

" + }, "ListUserGroups":{ "name":"ListUserGroups", "http":{ @@ -1183,6 +1376,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"}, {"shape":"UnsupportedUserEditionException"}, + {"shape":"ConflictException"}, {"shape":"InternalFailureException"} ], "documentation":"

Updates the template alias of a template.

" @@ -1205,6 +1399,63 @@ ], "documentation":"

Updates the resource permissions for a template.

" }, + "UpdateTheme":{ + "name":"UpdateTheme", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}" + }, + "input":{"shape":"UpdateThemeRequest"}, + "output":{"shape":"UpdateThemeResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Updates a theme.

" + }, + "UpdateThemeAlias":{ + "name":"UpdateThemeAlias", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}/aliases/{AliasName}" + }, + "input":{"shape":"UpdateThemeAliasRequest"}, + "output":{"shape":"UpdateThemeAliasResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Updates an alias of a theme.

" + }, + "UpdateThemePermissions":{ + "name":"UpdateThemePermissions", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/themes/{ThemeId}/permissions" + }, + "input":{"shape":"UpdateThemePermissionsRequest"}, + "output":{"shape":"UpdateThemePermissionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Updates the resource permissions for a theme. Permissions apply to the action to grant or revoke permissions on, for example \"quicksight:DescribeTheme\".

Theme permissions apply in groupings. Valid groupings include the following for the three levels of permissions, which are user, owner, or no permissions:

  • User

    • \"quicksight:DescribeTheme\"

    • \"quicksight:DescribeThemeAlias\"

    • \"quicksight:ListThemeAliases\"

    • \"quicksight:ListThemeVersions\"

  • Owner

    • \"quicksight:DescribeTheme\"

    • \"quicksight:DescribeThemeAlias\"

    • \"quicksight:ListThemeAliases\"

    • \"quicksight:ListThemeVersions\"

    • \"quicksight:DeleteTheme\"

    • \"quicksight:UpdateTheme\"

    • \"quicksight:CreateThemeAlias\"

    • \"quicksight:DeleteThemeAlias\"

    • \"quicksight:UpdateThemeAlias\"

    • \"quicksight:UpdateThemePermissions\"

    • \"quicksight:DescribeThemePermissions\"

  • To specify no permissions, omit the permissions list.

" + }, "UpdateUser":{ "name":"UpdateUser", "http":{ @@ -1360,6 +1611,10 @@ "min":12, "pattern":"^[0-9]{12}$" }, + "AwsAndAccountId":{ + "type":"string", + "pattern":"^(aws|[0-9]{12})$" + }, "AwsIotAnalyticsParameters":{ "type":"structure", "required":["DataSetName"], @@ -1372,6 +1627,17 @@ "documentation":"

AWS IoT Analytics parameters.

" }, "Boolean":{"type":"boolean"}, + "BorderStyle":{ + "type":"structure", + "members":{ + "Show":{ + "shape":"boolean", + "documentation":"

The option to enable display of borders for visuals.

", + "box":true + } + }, + "documentation":"

The display options for tile borders for visuals.

" + }, "CalculatedColumn":{ "type":"structure", "required":[ @@ -1482,6 +1748,11 @@ "max":64, "min":1 }, + "ColorList":{ + "type":"list", + "member":{"shape":"HexColor"}, + "max":100 + }, "ColumnDataType":{ "type":"string", "enum":[ @@ -1593,7 +1864,7 @@ "documentation":"

A geospatial role for a column.

" } }, - "documentation":"

A tag for a column in a TagColumnOperation structure. This is a variant type structure. For this structure to be valid, only one of the attributes can be non-null.

" + "documentation":"

A tag for a column in a TagColumnOperation structure. This is a variant type structure. For this structure to be valid, only one of the attributes can be non-null.

" }, "ColumnTagList":{ "type":"list", @@ -1624,6 +1895,10 @@ "error":{"httpStatusCode":409}, "exception":true }, + "CopySourceArn":{ + "type":"string", + "pattern":"^arn:[-a-z0-9]*:quicksight:[-a-z0-9]*:[0-9]{12}:datasource/.+" + }, "CreateColumnsOperation":{ "type":"structure", "required":["Columns"], @@ -1662,7 +1937,7 @@ }, "Parameters":{ "shape":"Parameters", - "documentation":"

A structure that contains the parameters of the dashboard. These are parameter overrides for a dashboard. A dashboard can have any type of parameters, and some parameters might accept multiple values. You can use the dashboard permissions structure described following to override two string parameters that accept multiple values.

" + "documentation":"

The parameters for the creation of the dashboard, which you want to use to override the default settings. A dashboard can have any type of parameters, and some parameters might accept multiple values.

" }, "Permissions":{ "shape":"ResourcePermissionList", @@ -1670,7 +1945,7 @@ }, "SourceEntity":{ "shape":"DashboardSourceEntity", - "documentation":"

The source entity from which the dashboard is created. The source entity accepts the Amazon Resource Name (ARN) of the source template or analysis and also references the replacement datasets for the placeholders set when creating the template. The replacement datasets need to follow the same schema as the datasets for which placeholders were created when creating the template.

If you are creating a dashboard from a source entity in a different AWS account, use the ARN of the source template.

" + "documentation":"

The entity that you are using as a source when you create the dashboard. In SourceEntity, you specify the type of object you're using as source. You can only create a dashboard from a template, so you use a SourceTemplate entity. If you need to create a dashboard from an analysis, first convert the analysis to a template by using the CreateTemplate API operation. For SourceTemplate, specify the Amazon Resource Name (ARN) of the source template. The SourceTemplateARN can contain any AWS Account and any QuickSight-supported AWS Region.

Use the DataSetReferences entity within SourceTemplate to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" }, "Tags":{ "shape":"TagList", @@ -1682,7 +1957,11 @@ }, "DashboardPublishOptions":{ "shape":"DashboardPublishOptions", - "documentation":"

Options for publishing the dashboard when you create it:

  • AvailabilityStatus for AdHocFilteringOption - This status can be either ENABLED or DISABLED. When this is set to DISABLED, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED by default.

  • AvailabilityStatus for ExportToCSVOption - This status can be either ENABLED or DISABLED. The visual option to export data to .csv format isn't enabled when this is set to DISABLED. This option is ENABLED by default.

  • VisibilityState for SheetControlsOption - This visibility state can be either COLLAPSED or EXPANDED. The sheet controls pane is collapsed by default when set to true. This option is COLLAPSED by default.

" + "documentation":"

Options for publishing the dashboard when you create it:

  • AvailabilityStatus for AdHocFilteringOption - This status can be either ENABLED or DISABLED. When this is set to DISABLED, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED by default.

  • AvailabilityStatus for ExportToCSVOption - This status can be either ENABLED or DISABLED. The visual option to export data to .csv format isn't enabled when this is set to DISABLED. This option is ENABLED by default.

  • VisibilityState for SheetControlsOption - This visibility state can be either COLLAPSED or EXPANDED. This option is COLLAPSED by default.

" + }, + "ThemeArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If you add a value for this field, it overrides the value that is used in the source entity. The theme ARN must exist in the same AWS account where you create the dashboard.

" } } }, @@ -1691,7 +1970,7 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the dashboard.

" + "documentation":"

The ARN of the dashboard.

" }, "VersionArn":{ "shape":"Arn", @@ -2190,7 +2469,7 @@ }, "SourceEntity":{ "shape":"TemplateSourceEntity", - "documentation":"

The Amazon Resource Name (ARN) of the source entity from which this template is being created. Currently, you can create a template from an analysis or another template. If the ARN is for an analysis, include its dataset references.

" + "documentation":"

The entity that you are using as a source when you create the template. In SourceEntity, you specify the type of object you're using as source: SourceTemplate for a template or SourceAnalysis for an analysis. Both of these require an Amazon Resource Name (ARN). For SourceTemplate, specify the ARN of the source template. For SourceAnalysis, specify the ARN of the source analysis. The SourceTemplate ARN can contain any AWS Account and any QuickSight-supported AWS Region.

Use the DataSetReferences entity within SourceTemplate or SourceAnalysis to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" }, "Tags":{ "shape":"TagList", @@ -2232,49 +2511,182 @@ } } }, - "CredentialPair":{ + "CreateThemeAliasRequest":{ "type":"structure", "required":[ - "Username", - "Password" + "AwsAccountId", + "ThemeId", + "AliasName", + "ThemeVersionNumber" ], "members":{ - "Username":{ - "shape":"Username", - "documentation":"

User name.

" + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account that contains the theme for the new theme alias.

", + "location":"uri", + "locationName":"AwsAccountId" }, - "Password":{ - "shape":"Password", - "documentation":"

Password.

" + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

An ID for the theme alias.

", + "location":"uri", + "locationName":"ThemeId" + }, + "AliasName":{ + "shape":"AliasName", + "documentation":"

The name that you want to give to the theme alias that you are creating. The alias name can't begin with a $. Alias names that start with $ are reserved by Amazon QuickSight.

", + "location":"uri", + "locationName":"AliasName" + }, + "ThemeVersionNumber":{ + "shape":"VersionNumber", + "documentation":"

The version number of the theme.

" } - }, - "documentation":"

The combination of user name and password that are used as credentials.

" + } }, - "CustomSql":{ + "CreateThemeAliasResponse":{ + "type":"structure", + "members":{ + "ThemeAlias":{ + "shape":"ThemeAlias", + "documentation":"

Information about the theme alias.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + } + } + }, + "CreateThemeRequest":{ "type":"structure", "required":[ - "DataSourceArn", + "AwsAccountId", + "ThemeId", "Name", - "SqlQuery" + "BaseThemeId", + "Configuration" ], "members":{ - "DataSourceArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the data source.

" + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account where you want to store the new theme.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

An ID for the theme that you want to create. The theme ID is unique per AWS Region in each AWS account.

", + "location":"uri", + "locationName":"ThemeId" }, "Name":{ - "shape":"CustomSqlName", - "documentation":"

A display name for the SQL query result.

" + "shape":"ThemeName", + "documentation":"

A display name for the theme.

" }, - "SqlQuery":{ - "shape":"SqlQuery", - "documentation":"

The SQL query.

" + "BaseThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID of the theme that a custom theme will inherit from. All themes inherit from one of the starting themes defined by Amazon QuickSight. For a list of the starting themes, use ListThemes or choose Themes from within a QuickSight analysis.

" }, - "Columns":{ - "shape":"InputColumnList", - "documentation":"

The column schema from the SQL query result set.

" - } - }, + "VersionDescription":{ + "shape":"VersionDescription", + "documentation":"

A description of the first version of the theme that you're creating. Every time UpdateTheme is called, a new version is created. Each version of the theme has a description of the version in the VersionDescription field.

" + }, + "Configuration":{ + "shape":"ThemeConfiguration", + "documentation":"

The theme configuration, which contains the theme display properties.

" + }, + "Permissions":{ + "shape":"ResourcePermissionList", + "documentation":"

A valid grouping of resource permissions to apply to the new theme.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A map of the key-value pairs for the resource tag or tags that you want to add to the resource.

" + } + } + }, + "CreateThemeResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the theme.

" + }, + "VersionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the new theme.

" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID of the theme.

" + }, + "CreationStatus":{ + "shape":"ResourceStatus", + "documentation":"

The theme creation status.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + } + } + }, + "CredentialPair":{ + "type":"structure", + "required":[ + "Username", + "Password" + ], + "members":{ + "Username":{ + "shape":"Username", + "documentation":"

User name.

" + }, + "Password":{ + "shape":"Password", + "documentation":"

Password.

" + }, + "AlternateDataSourceParameters":{ + "shape":"DataSourceParametersList", + "documentation":"

A set of alternate data source parameters that you want to share for these credentials. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API compares the DataSourceParameters structure that's in the request with the structures in the AlternateDataSourceParameters allowlist. If the structures are an exact match, the request is allowed to use the new data source with the existing credentials. If the AlternateDataSourceParameters list is null, the DataSourceParameters originally used with these Credentials is automatically allowed.

" + } + }, + "documentation":"

The combination of user name and password that are used as credentials.

" + }, + "CustomSql":{ + "type":"structure", + "required":[ + "DataSourceArn", + "Name", + "SqlQuery" + ], + "members":{ + "DataSourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the data source.

" + }, + "Name":{ + "shape":"CustomSqlName", + "documentation":"

A display name for the SQL query result.

" + }, + "SqlQuery":{ + "shape":"SqlQuery", + "documentation":"

The SQL query.

" + }, + "Columns":{ + "shape":"InputColumnList", + "documentation":"

The column schema from the SQL query result set.

" + } + }, "documentation":"

A physical table type built from the results of the custom SQL query.

" }, "CustomSqlName":{ @@ -2295,7 +2707,7 @@ }, "Name":{ "shape":"DashboardName", - "documentation":"

A display name for the dataset.

" + "documentation":"

A display name for the dashboard.

" }, "Version":{ "shape":"DashboardVersion", @@ -2345,6 +2757,8 @@ "DashboardErrorType":{ "type":"string", "enum":[ + "ACCESS_DENIED", + "SOURCE_NOT_FOUND", "DATA_SET_NOT_FOUND", "INTERNAL_FAILURE", "PARAMETER_VALUE_INCOMPATIBLE", @@ -2389,15 +2803,15 @@ "members":{ "Operator":{ "shape":"FilterOperator", - "documentation":"

The comparison operator that you want to use as a filter. For example, \"Operator\": \"StringEquals\".

" + "documentation":"

The comparison operator that you want to use as a filter, for example, \"Operator\": \"StringEquals\".

" }, "Name":{ "shape":"DashboardFilterAttribute", - "documentation":"

The name of the value that you want to use as a filter. For example, \"Name\": \"QUICKSIGHT_USER\".

" + "documentation":"

The name of the value that you want to use as a filter, for example, \"Name\": \"QUICKSIGHT_USER\".

" }, "Value":{ "shape":"String", - "documentation":"

The value of the named item, in this case QUICKSIGHT_USER, that you want to use as a filter. For example, \"Value\": \"arn:aws:quicksight:us-east-1:1:user/default/UserName1\".

" + "documentation":"

The value of the named item, in this case QUICKSIGHT_USER, that you want to use as a filter, for example, \"Value\": \"arn:aws:quicksight:us-east-1:1:user/default/UserName1\".

" } }, "documentation":"

A filter that you apply when searching for dashboards.

" @@ -2508,6 +2922,10 @@ "shape":"Arn", "documentation":"

Source entity ARN.

" }, + "DataSetArns":{ + "shape":"DataSetArnsList", + "documentation":"

The Amazon Resource Numbers (ARNs) for the datasets that are associated with a version of the dashboard.

" + }, "Description":{ "shape":"VersionDescription", "documentation":"

Description.

" @@ -2550,6 +2968,24 @@ "member":{"shape":"DashboardVersionSummary"}, "max":100 }, + "DataColorPalette":{ + "type":"structure", + "members":{ + "Colors":{ + "shape":"ColorList", + "documentation":"

The hexadecimal codes for the colors.

" + }, + "MinMaxGradient":{ + "shape":"ColorList", + "documentation":"

The minimum and maximum hexadecimal codes that describe a color gradient.

" + }, + "EmptyFillColor":{ + "shape":"HexColor", + "documentation":"

The hexadecimal code of a color that applies to charts where a lack of data is highlighted.

" + } + }, + "documentation":"

The theme colors that are used for data colors in charts. The colors description is a hexidecimal color code that consists of six alphanumerical characters, prefixed with #, for example #37BFF5.

" + }, "DataSet":{ "type":"structure", "members":{ @@ -2604,6 +3040,11 @@ }, "documentation":"

Dataset.

" }, + "DataSetArnsList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":100 + }, "DataSetConfiguration":{ "type":"structure", "members":{ @@ -2745,6 +3186,10 @@ "shape":"DataSourceParameters", "documentation":"

The parameters that Amazon QuickSight uses to connect to your underlying source. This is a variant type structure. For this structure to be valid, only one of the attributes can be non-null.

" }, + "AlternateDataSourceParameters":{ + "shape":"DataSourceParametersList", + "documentation":"

A set of alternate data source parameters that you want to share for the credentials stored with this data source. The credentials are applied in tandem with the data source parameters when you copy a data source by using a create or update request. The API compares the DataSourceParameters structure that's in the request with the structures in the AlternateDataSourceParameters allowlist. If the structures are an exact match, the request is allowed to use the credentials from this existing data source. If the AlternateDataSourceParameters list is null, the Credentials originally used with this DataSourceParameters are automatically allowed.

" + }, "VpcConnectionProperties":{ "shape":"VpcConnectionProperties", "documentation":"

The VPC connection information. You need to use this parameter only when you want QuickSight to use a VPC connection when connecting to your underlying source.

" @@ -2765,10 +3210,14 @@ "members":{ "CredentialPair":{ "shape":"CredentialPair", - "documentation":"

Credential pair.

" + "documentation":"

Credential pair. For more information, see CredentialPair.

" + }, + "CopySourceArn":{ + "shape":"CopySourceArn", + "documentation":"

The Amazon Resource Name (ARN) of a data source that has the credential pair that you want to use. When CopySourceArn is not null, the credential pair from the data source in the ARN is used as the credentials for the DataSourceCredentials structure.

" } }, - "documentation":"

Data source credentials.

", + "documentation":"

Data source credentials. This is a variant type structure. For this structure to be valid, only one of the attributes can be non-null.

", "sensitive":true }, "DataSourceErrorInfo":{ @@ -2788,6 +3237,8 @@ "DataSourceErrorInfoType":{ "type":"string", "enum":[ + "ACCESS_DENIED", + "COPY_SOURCE_NOT_FOUND", "TIMEOUT", "ENGINE_VERSION_NOT_SUPPORTED", "UNKNOWN_HOST", @@ -2882,6 +3333,12 @@ }, "documentation":"

The parameters that Amazon QuickSight uses to connect to your underlying data source. This is a variant type structure. For this structure to be valid, only one of the attributes can be non-null.

" }, + "DataSourceParametersList":{ + "type":"list", + "member":{"shape":"DataSourceParameters"}, + "max":50, + "min":1 + }, "DataSourceType":{ "type":"string", "enum":[ @@ -3253,7 +3710,7 @@ }, "AliasName":{ "shape":"AliasName", - "documentation":"

The name for the template alias. If you name a specific alias, you delete the version that the alias points to. You can specify the latest version of the template by providing the keyword $LATEST in the AliasName parameter.

", + "documentation":"

The name for the template alias. To delete a specific alias, you delete the version that the alias points to. You can specify the alias name, or specify the latest version of the template by providing the keyword $LATEST in the AliasName parameter.

", "location":"uri", "locationName":"AliasName" } @@ -3277,7 +3734,7 @@ }, "Arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + "documentation":"

The Amazon Resource Name (ARN) of the template you want to delete.

" }, "RequestId":{ "shape":"String", @@ -3334,6 +3791,109 @@ } } }, + "DeleteThemeAliasRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "ThemeId", + "AliasName" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account that contains the theme alias to delete.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme that the specified alias is for.

", + "location":"uri", + "locationName":"ThemeId" + }, + "AliasName":{ + "shape":"AliasName", + "documentation":"

The unique name for the theme alias to delete.

", + "location":"uri", + "locationName":"AliasName" + } + } + }, + "DeleteThemeAliasResponse":{ + "type":"structure", + "members":{ + "AliasName":{ + "shape":"AliasName", + "documentation":"

The name for the theme alias.

" + }, + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the theme resource using the deleted alias.

" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

An ID for the theme associated with the deletion.

" + } + } + }, + "DeleteThemeRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "ThemeId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account that contains the theme that you're deleting.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

An ID for the theme that you want to delete.

", + "location":"uri", + "locationName":"ThemeId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "documentation":"

The version of the theme that you want to delete.

Note: If you don't provide a version number, you're using this call to DeleteTheme to delete all versions of the theme.

", + "location":"querystring", + "locationName":"version-number" + } + } + }, + "DeleteThemeResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

An ID for the theme.

" + } + } + }, "DeleteUserByPrincipalIdRequest":{ "type":"structure", "required":[ @@ -3969,43 +4529,91 @@ "shape":"StatusCode", "documentation":"

The HTTP status of the request.

", "location":"statusCode" - } + }, + "RequestId":{"shape":"String"} } }, - "DescribeUserRequest":{ + "DescribeThemeAliasRequest":{ "type":"structure", "required":[ - "UserName", "AwsAccountId", - "Namespace" + "ThemeId", + "AliasName" ], "members":{ - "UserName":{ - "shape":"UserName", - "documentation":"

The name of the user that you want to describe.

", + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account that contains the theme alias that you're describing.

", "location":"uri", - "locationName":"UserName" + "locationName":"AwsAccountId" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme.

", + "location":"uri", + "locationName":"ThemeId" + }, + "AliasName":{ + "shape":"AliasName", + "documentation":"

The name of the theme alias that you want to describe.

", + "location":"uri", + "locationName":"AliasName" + } + } + }, + "DescribeThemeAliasResponse":{ + "type":"structure", + "members":{ + "ThemeAlias":{ + "shape":"ThemeAlias", + "documentation":"

Information about the theme alias.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + } + } + }, + "DescribeThemePermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "ThemeId" + ], + "members":{ "AwsAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "documentation":"

The ID of the AWS account that contains the theme that you're describing.

", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", - "documentation":"

The namespace. Currently, you should set this to default.

", + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme that you want to describe permissions for.

", "location":"uri", - "locationName":"Namespace" + "locationName":"ThemeId" } } }, - "DescribeUserResponse":{ + "DescribeThemePermissionsResponse":{ "type":"structure", "members":{ - "User":{ - "shape":"User", - "documentation":"

The user name.

" + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme.

" + }, + "ThemeArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the theme.

" + }, + "Permissions":{ + "shape":"ResourcePermissionList", + "documentation":"

A list of resource permissions set on the theme.

" }, "RequestId":{ "shape":"String", @@ -4018,22 +4626,119 @@ } } }, - "Domain":{ - "type":"string", - "max":64, - "min":1 - }, - "DomainNotWhitelistedException":{ + "DescribeThemeRequest":{ "type":"structure", + "required":[ + "AwsAccountId", + "ThemeId" + ], "members":{ - "Message":{"shape":"String"}, - "RequestId":{ - "shape":"String", - "documentation":"

The AWS request ID for this request.

" - } - }, - "documentation":"

The domain specified isn't on the allow list. All domains for embedded dashboards must be added to the approved list by an Amazon QuickSight admin.

", - "error":{"httpStatusCode":403}, + "AwsAccountId":{ + "shape":"AwsAndAccountId", + "documentation":"

The ID of the AWS account that contains the theme that you're describing.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme.

", + "location":"uri", + "locationName":"ThemeId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "documentation":"

The version number for the version to describe. If a VersionNumber parameter value isn't provided, the latest version of the theme is described.

", + "location":"querystring", + "locationName":"version-number" + }, + "AliasName":{ + "shape":"AliasName", + "documentation":"

The alias of the theme that you want to describe. If you name a specific alias, you describe the version that the alias points to. You can specify the latest version of the theme by providing the keyword $LATEST in the AliasName parameter. The keyword $PUBLISHED doesn't apply to themes.

", + "location":"querystring", + "locationName":"alias-name" + } + } + }, + "DescribeThemeResponse":{ + "type":"structure", + "members":{ + "Theme":{ + "shape":"Theme", + "documentation":"

The information about the theme that you are describing.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + } + } + }, + "DescribeUserRequest":{ + "type":"structure", + "required":[ + "UserName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "UserName":{ + "shape":"UserName", + "documentation":"

The name of the user that you want to describe.

", + "location":"uri", + "locationName":"UserName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

The namespace. Currently, you should set this to default.

", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DescribeUserResponse":{ + "type":"structure", + "members":{ + "User":{ + "shape":"User", + "documentation":"

The user name.

" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + } + } + }, + "Domain":{ + "type":"string", + "max":64, + "min":1 + }, + "DomainNotWhitelistedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this request.

" + } + }, + "documentation":"

The domain specified isn't on the allow list. All domains for embedded dashboards must be added to the approved list by an Amazon QuickSight admin.

", + "error":{"httpStatusCode":403}, "exception":true }, "Double":{"type":"double"}, @@ -4210,7 +4915,7 @@ "members":{ "EmbedUrl":{ "shape":"EmbeddingUrl", - "documentation":"

An URL that you can put into your server-side webpage to embed your dashboard. This URL is valid for 5 minutes, and the resulting session is valid for 10 hours. The API provides the URL with an auth_code value that enables a single sign-on session.

" + "documentation":"

A single-use URL that you can put into your server-side webpage to embed your dashboard. This URL is valid for 5 minutes. The API provides the URL with an auth_code value that enables one (and only one) sign-on to a user session that is valid for 10 hours.

" }, "Status":{ "shape":"StatusCode", @@ -4283,6 +4988,21 @@ "min":1, "pattern":"[\\u0020-\\u00FF]+" }, + "GutterStyle":{ + "type":"structure", + "members":{ + "Show":{ + "shape":"boolean", + "documentation":"

This Boolean value controls whether to display a gutter space between sheet tiles.

", + "box":true + } + }, + "documentation":"

The display options for gutter spacing between tiles on a sheet.

" + }, + "HexColor":{ + "type":"string", + "pattern":"^#[A-F0-9]{6}$" + }, "Host":{ "type":"string", "max":256, @@ -4316,7 +5036,7 @@ "documentation":"

Assignment status.

" } }, - "documentation":"

An IAM policy assignment.

" + "documentation":"

An AWS Identity and Access Management (IAM) policy assignment.

" }, "IAMPolicyAssignmentName":{ "type":"string", @@ -4768,7 +5488,7 @@ "members":{ "DashboardSummaryList":{ "shape":"DashboardSummaryList", - "documentation":"

A structure that contains all of the dashboards shared with the user. This structure provides basic information about the dashboards.

" + "documentation":"

A structure that contains all of the dashboards in your AWS account. This structure provides basic information about the dashboards.

" }, "NextToken":{ "shape":"String", @@ -5366,6 +6086,171 @@ } } }, + "ListThemeAliasesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "ThemeId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account that contains the theme aliases that you're listing.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme.

", + "location":"uri", + "locationName":"ThemeId" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token for the next set of results, or null if there are no more results.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per request.

", + "box":true, + "location":"querystring", + "locationName":"max-result" + } + } + }, + "ListThemeAliasesResponse":{ + "type":"structure", + "members":{ + "ThemeAliasList":{ + "shape":"ThemeAliasList", + "documentation":"

A structure containing the list of the theme's aliases.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token for the next set of results, or null if there are no more results.

" + } + } + }, + "ListThemeVersionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "ThemeId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account that contains the themes that you're listing.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme.

", + "location":"uri", + "locationName":"ThemeId" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token for the next set of results, or null if there are no more results.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per request.

", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListThemeVersionsResponse":{ + "type":"structure", + "members":{ + "ThemeVersionSummaryList":{ + "shape":"ThemeVersionSummaryList", + "documentation":"

A structure containing a list of all the versions of the specified theme.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token for the next set of results, or null if there are no more results.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + } + } + }, + "ListThemesRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account that contains the themes that you're listing.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token for the next set of results, or null if there are no more results.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned per request.

", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "Type":{ + "shape":"ThemeType", + "documentation":"

The type of themes that you want to list. Valid options include the following:

  • ALL (default)- Display all existing themes.

  • CUSTOM - Display only the themes created by people using Amazon QuickSight.

  • QUICKSIGHT - Display only the starting themes defined by QuickSight.

", + "location":"querystring", + "locationName":"type" + } + } + }, + "ListThemesResponse":{ + "type":"structure", + "members":{ + "ThemeSummaryList":{ + "shape":"ThemeSummaryList", + "documentation":"

Information about the themes in the list.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token for the next set of results, or null if there are no more results.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + } + } + }, "ListUserGroupsRequest":{ "type":"structure", "required":[ @@ -5562,6 +6447,17 @@ }, "documentation":"

Amazon S3 manifest file location.

" }, + "MarginStyle":{ + "type":"structure", + "members":{ + "Show":{ + "shape":"boolean", + "documentation":"

This Boolean value controls whether to display sheet margins.

", + "box":true + } + }, + "documentation":"

The display options for margins around the outside edge of sheets.

" + }, "MariaDbParameters":{ "type":"structure", "required":[ @@ -5903,7 +6799,7 @@ }, "SessionName":{ "shape":"RoleSessionName", - "documentation":"

You need to use this parameter only when you register one or more users using an assumed IAM role. You don't need to provide the session name for other scenarios, for example when you are registering an IAM user or an Amazon QuickSight user. You can register multiple users using the same IAM role if each user has a different session name. For more information on assuming IAM roles, see assume-role in the AWS CLI Reference.

" + "documentation":"

You need to use this parameter only when you register one or more users using an assumed IAM role. You don't need to provide the session name for other scenarios, for example when you are registering an IAM user or an Amazon QuickSight user. You can register multiple users using the same IAM role if each user has a different session name. For more information on assuming IAM roles, see assume-role in the AWS CLI Reference.

" }, "AwsAccountId":{ "shape":"AwsAccountId", @@ -6005,7 +6901,7 @@ "Message":{"shape":"String"}, "ResourceType":{ "shape":"ExceptionResourceType", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The resource type for this request.

" }, "RequestId":{ "shape":"String", @@ -6028,7 +6924,7 @@ "Message":{"shape":"String"}, "ResourceType":{ "shape":"ExceptionResourceType", - "documentation":"

The AWS request ID for this request.

" + "documentation":"

The resource type for this request.

" }, "RequestId":{ "shape":"String", @@ -6048,7 +6944,7 @@ "members":{ "Principal":{ "shape":"Principal", - "documentation":"

The Amazon Resource Name (ARN) of an Amazon QuickSight user or group, or an IAM ARN. If you are using cross-account resource sharing, this is the IAM ARN of an account root. Otherwise, it is the ARN of a QuickSight user or group. .

" + "documentation":"

The Amazon Resource Name (ARN) of the principal. This can be one of the following:

  • The ARN of an Amazon QuickSight user, group, or namespace. (This is most common.)

  • The ARN of an AWS account root: This is an IAM ARN rather than a QuickSight ARN. Use this option only to share resources (templates) across AWS accounts. (This is less common.)

" }, "Actions":{ "shape":"ActionList", @@ -6202,7 +7098,7 @@ }, "Filters":{ "shape":"DashboardSearchFilterList", - "documentation":"

The filters to apply to the search. Currently, you can search only by user name. For example, \"Filters\": [ { \"Name\": \"QUICKSIGHT_USER\", \"Operator\": \"StringEquals\", \"Value\": \"arn:aws:quicksight:us-east-1:1:user/default/UserName1\" } ]

" + "documentation":"

The filters to apply to the search. Currently, you can search only by user name, for example, \"Filters\": [ { \"Name\": \"QUICKSIGHT_USER\", \"Operator\": \"StringEquals\", \"Value\": \"arn:aws:quicksight:us-east-1:1:user/default/UserName1\" } ]

" }, "NextToken":{ "shape":"String", @@ -6275,11 +7171,25 @@ }, "documentation":"

Sheet controls option.

" }, - "SiteBaseUrl":{ - "type":"string", - "max":1024, - "min":1 - }, + "SheetStyle":{ + "type":"structure", + "members":{ + "Tile":{ + "shape":"TileStyle", + "documentation":"

The display options for tiles.

" + }, + "TileLayout":{ + "shape":"TileLayoutStyle", + "documentation":"

The layout options for tiles.

" + } + }, + "documentation":"

The theme display options for sheets.

" + }, + "SiteBaseUrl":{ + "type":"string", + "max":1024, + "min":1 + }, "SnowflakeParameters":{ "type":"structure", "required":[ @@ -6554,6 +7464,7 @@ "TemplateErrorType":{ "type":"string", "enum":[ + "SOURCE_NOT_FOUND", "DATA_SET_NOT_FOUND", "INTERNAL_FAILURE" ] @@ -6681,7 +7592,7 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

The ARN of the template version.

" + "documentation":"

The Amazon Resource Name (ARN) of the template version.

" }, "VersionNumber":{ "shape":"VersionNumber", @@ -6737,6 +7648,218 @@ "SINGLE_QUOTE" ] }, + "Theme":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the theme.

" + }, + "Name":{ + "shape":"ThemeName", + "documentation":"

The name that the user gives to the theme.

" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The identifier that the user gives to the theme.

" + }, + "Version":{"shape":"ThemeVersion"}, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the theme was created.

" + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that the theme was last updated.

" + }, + "Type":{ + "shape":"ThemeType", + "documentation":"

The type of theme, based on how it was created. Valid values include: QUICKSIGHT and CUSTOM.

" + } + }, + "documentation":"

" + }, + "ThemeAlias":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the theme alias.

" + }, + "AliasName":{ + "shape":"AliasName", + "documentation":"

The display name of the theme alias.

" + }, + "ThemeVersionNumber":{ + "shape":"VersionNumber", + "documentation":"

The version number of the theme alias.

" + } + }, + "documentation":"

An alias for a theme.

" + }, + "ThemeAliasList":{ + "type":"list", + "member":{"shape":"ThemeAlias"}, + "max":100 + }, + "ThemeConfiguration":{ + "type":"structure", + "members":{ + "DataColorPalette":{ + "shape":"DataColorPalette", + "documentation":"

Color properties that apply to chart data colors.

" + }, + "UIColorPalette":{ + "shape":"UIColorPalette", + "documentation":"

Color properties that apply to the UI and to charts, excluding the colors that apply to data.

" + }, + "Sheet":{ + "shape":"SheetStyle", + "documentation":"

Display options related to sheets.

" + } + }, + "documentation":"

The theme configuration. This configuration contains all of the display properties for a theme.

" + }, + "ThemeError":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"ThemeErrorType", + "documentation":"

The type of error.

" + }, + "Message":{ + "shape":"NonEmptyString", + "documentation":"

The error message.

" + } + }, + "documentation":"

Theme error.

" + }, + "ThemeErrorList":{ + "type":"list", + "member":{"shape":"ThemeError"}, + "min":1 + }, + "ThemeErrorType":{ + "type":"string", + "enum":["INTERNAL_FAILURE"] + }, + "ThemeName":{ + "type":"string", + "max":2048, + "min":1 + }, + "ThemeSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + }, + "Name":{ + "shape":"ThemeName", + "documentation":"

the display name for the theme.

" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID of the theme. This ID is unique per AWS Region for each AWS account.

" + }, + "LatestVersionNumber":{ + "shape":"VersionNumber", + "documentation":"

The latest version number for the theme.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that this theme was created.

" + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

The last date and time that this theme was updated.

" + } + }, + "documentation":"

The theme summary.

" + }, + "ThemeSummaryList":{ + "type":"list", + "member":{"shape":"ThemeSummary"}, + "max":100 + }, + "ThemeType":{ + "type":"string", + "enum":[ + "QUICKSIGHT", + "CUSTOM", + "ALL" + ] + }, + "ThemeVersion":{ + "type":"structure", + "members":{ + "VersionNumber":{ + "shape":"VersionNumber", + "documentation":"

The version number of the theme.

" + }, + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

" + }, + "Description":{ + "shape":"VersionDescription", + "documentation":"

The description of the theme.

" + }, + "BaseThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The Amazon QuickSight-defined ID of the theme that a custom theme inherits from. All themes initially inherit from a default QuickSight theme.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that this theme version was created.

" + }, + "Configuration":{ + "shape":"ThemeConfiguration", + "documentation":"

The theme configuration, which contains all the theme display properties.

" + }, + "Errors":{ + "shape":"ThemeErrorList", + "documentation":"

Errors associated with the theme.

" + }, + "Status":{ + "shape":"ResourceStatus", + "documentation":"

The status of the theme version.

" + } + }, + "documentation":"

A version of a theme.

" + }, + "ThemeVersionSummary":{ + "type":"structure", + "members":{ + "VersionNumber":{ + "shape":"VersionNumber", + "documentation":"

The version number of the theme version.

" + }, + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the theme version.

" + }, + "Description":{ + "shape":"VersionDescription", + "documentation":"

The description of the theme version.

" + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that this theme version was created.

" + }, + "Status":{ + "shape":"ResourceStatus", + "documentation":"

The status of the theme version.

" + } + }, + "documentation":"

The theme version.

" + }, + "ThemeVersionSummaryList":{ + "type":"list", + "member":{"shape":"ThemeVersionSummary"}, + "max":100 + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -6750,6 +7873,30 @@ "error":{"httpStatusCode":429}, "exception":true }, + "TileLayoutStyle":{ + "type":"structure", + "members":{ + "Gutter":{ + "shape":"GutterStyle", + "documentation":"

The gutter settings that apply between tiles.

" + }, + "Margin":{ + "shape":"MarginStyle", + "documentation":"

The margin settings that apply around the outside edge of sheets.

" + } + }, + "documentation":"

The display options for the layout of tiles on a sheet.

" + }, + "TileStyle":{ + "type":"structure", + "members":{ + "Border":{ + "shape":"BorderStyle", + "documentation":"

The border around a tile.

" + } + }, + "documentation":"

Display options related to tiles on a sheet.

" + }, "Timestamp":{"type":"timestamp"}, "TimestampList":{ "type":"list", @@ -6813,6 +7960,76 @@ "type":"string", "max":32 }, + "UIColorPalette":{ + "type":"structure", + "members":{ + "PrimaryForeground":{ + "shape":"HexColor", + "documentation":"

The color of text and other foreground elements that appear over the primary background regions, such as grid lines, borders, table banding, icons, and so on.

" + }, + "PrimaryBackground":{ + "shape":"HexColor", + "documentation":"

The background color that applies to visuals and other high emphasis UI.

" + }, + "SecondaryForeground":{ + "shape":"HexColor", + "documentation":"

The foreground color that applies to any sheet title, sheet control text, or UI that appears over the secondary background.

" + }, + "SecondaryBackground":{ + "shape":"HexColor", + "documentation":"

The background color that applies to the sheet background and sheet controls.

" + }, + "Accent":{ + "shape":"HexColor", + "documentation":"

This color is that applies to selected states and buttons.

" + }, + "AccentForeground":{ + "shape":"HexColor", + "documentation":"

The foreground color that applies to any text or other elements that appear over the accent color.

" + }, + "Danger":{ + "shape":"HexColor", + "documentation":"

The color that applies to error messages.

" + }, + "DangerForeground":{ + "shape":"HexColor", + "documentation":"

The foreground color that applies to any text or other elements that appear over the error color.

" + }, + "Warning":{ + "shape":"HexColor", + "documentation":"

This color that applies to warning and informational messages.

" + }, + "WarningForeground":{ + "shape":"HexColor", + "documentation":"

The foreground color that applies to any text or other elements that appear over the warning color.

" + }, + "Success":{ + "shape":"HexColor", + "documentation":"

The color that applies to success messages, for example the check mark for a successful download.

" + }, + "SuccessForeground":{ + "shape":"HexColor", + "documentation":"

The foreground color that applies to any text or other elements that appear over the success color.

" + }, + "Dimension":{ + "shape":"HexColor", + "documentation":"

The color that applies to the names of fields that are identified as dimensions.

" + }, + "DimensionForeground":{ + "shape":"HexColor", + "documentation":"

The foreground color that applies to any text or other elements that appear over the dimension color.

" + }, + "Measure":{ + "shape":"HexColor", + "documentation":"

The color that applies to the names of fields that are identified as measures.

" + }, + "MeasureForeground":{ + "shape":"HexColor", + "documentation":"

The foreground color that applies to any text or other elements that appear over the measure color.

" + } + }, + "documentation":"

The theme colors that apply to UI and to charts, excluding data colors. The colors description is a hexidecimal color code that consists of six alphanumerical characters, prefixed with #, for example #37BFF5. For more information, see Using Themes in Amazon QuickSight in the Amazon QuickSight User Guide.

" + }, "UnsupportedUserEditionException":{ "type":"structure", "members":{ @@ -6993,11 +8210,11 @@ }, "SourceEntity":{ "shape":"DashboardSourceEntity", - "documentation":"

The template or analysis from which the dashboard is created. The SouceTemplate entity accepts the Amazon Resource Name (ARN) of the template and also references to replacement datasets for the placeholders set when creating the template. The replacement datasets need to follow the same schema as the datasets for which placeholders were created when creating the template.

" + "documentation":"

The entity that you are using as a source when you update the dashboard. In SourceEntity, you specify the type of object you're using as source. You can only update a dashboard from a template, so you use a SourceTemplate entity. If you need to update a dashboard from an analysis, first convert the analysis to a template by using the CreateTemplate API operation. For SourceTemplate, specify the Amazon Resource Name (ARN) of the source template. The SourceTemplate ARN can contain any AWS Account and any QuickSight-supported AWS Region.

Use the DataSetReferences entity within SourceTemplate to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" }, "Parameters":{ "shape":"Parameters", - "documentation":"

A structure that contains the parameters of the dashboard.

" + "documentation":"

A structure that contains the parameters of the dashboard. These are parameter overrides for a dashboard. A dashboard can have any type of parameters, and some parameters might accept multiple values.

" }, "VersionDescription":{ "shape":"VersionDescription", @@ -7005,7 +8222,11 @@ }, "DashboardPublishOptions":{ "shape":"DashboardPublishOptions", - "documentation":"

Options for publishing the dashboard when you create it:

  • AvailabilityStatus for AdHocFilteringOption - This status can be either ENABLED or DISABLED. When this is set to DISABLED, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED by default.

  • AvailabilityStatus for ExportToCSVOption - This status can be either ENABLED or DISABLED. The visual option to export data to .csv format isn't enabled when this is set to DISABLED. This option is ENABLED by default.

  • VisibilityState for SheetControlsOption - This visibility state can be either COLLAPSED or EXPANDED. The sheet controls pane is collapsed by default when set to true. This option is COLLAPSED by default.

" + "documentation":"

Options for publishing the dashboard when you create it:

  • AvailabilityStatus for AdHocFilteringOption - This status can be either ENABLED or DISABLED. When this is set to DISABLED, QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is ENABLED by default.

  • AvailabilityStatus for ExportToCSVOption - This status can be either ENABLED or DISABLED. The visual option to export data to .csv format isn't enabled when this is set to DISABLED. This option is ENABLED by default.

  • VisibilityState for SheetControlsOption - This visibility state can be either COLLAPSED or EXPANDED. This option is COLLAPSED by default.

" + }, + "ThemeArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If you add a value for this field, it overrides the value that was originally associated with the entity. The theme ARN must exist in the same AWS account where you create the dashboard.

" } } }, @@ -7413,8 +8634,7 @@ "UpdateResourcePermissionList":{ "type":"list", "member":{"shape":"ResourcePermission"}, - "max":100, - "min":1 + "max":100 }, "UpdateTemplateAliasRequest":{ "type":"structure", @@ -7544,7 +8764,7 @@ }, "SourceEntity":{ "shape":"TemplateSourceEntity", - "documentation":"

The source QuickSight entity from which this template is being updated. You can currently update templates from an Analysis or another template.

" + "documentation":"

The entity that you are using as a source when you update the template. In SourceEntity, you specify the type of object you're using as source: SourceTemplate for a template or SourceAnalysis for an analysis. Both of these require an Amazon Resource Name (ARN). For SourceTemplate, specify the ARN of the source template. For SourceAnalysis, specify the ARN of the source analysis. The SourceTemplate ARN can contain any AWS Account and any QuickSight-supported AWS Region.

Use the DataSetReferences entity within SourceTemplate or SourceAnalysis to list the replacement datasets for the placeholders listed in the original. The schema in each dataset must match its placeholder.

" }, "VersionDescription":{ "shape":"VersionDescription", @@ -7586,6 +8806,180 @@ } } }, + "UpdateThemeAliasRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "ThemeId", + "AliasName", + "ThemeVersionNumber" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account that contains the theme alias that you're updating.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme.

", + "location":"uri", + "locationName":"ThemeId" + }, + "AliasName":{ + "shape":"AliasName", + "documentation":"

The name of the theme alias that you want to update.

", + "location":"uri", + "locationName":"AliasName" + }, + "ThemeVersionNumber":{ + "shape":"VersionNumber", + "documentation":"

The version number of the theme that the alias should reference.

" + } + } + }, + "UpdateThemeAliasResponse":{ + "type":"structure", + "members":{ + "ThemeAlias":{ + "shape":"ThemeAlias", + "documentation":"

Information about the theme alias.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + } + } + }, + "UpdateThemePermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "ThemeId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account that contains the theme.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme.

", + "location":"uri", + "locationName":"ThemeId" + }, + "GrantPermissions":{ + "shape":"UpdateResourcePermissionList", + "documentation":"

A list of resource permissions to be granted for the theme.

" + }, + "RevokePermissions":{ + "shape":"UpdateResourcePermissionList", + "documentation":"

A list of resource permissions to be revoked from the theme.

" + } + } + }, + "UpdateThemePermissionsResponse":{ + "type":"structure", + "members":{ + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme.

" + }, + "ThemeArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the theme.

" + }, + "Permissions":{ + "shape":"ResourcePermissionList", + "documentation":"

The resulting list of resource permissions for the theme.

" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + } + } + }, + "UpdateThemeRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "ThemeId", + "BaseThemeId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the AWS account that contains the theme that you're updating.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme.

", + "location":"uri", + "locationName":"ThemeId" + }, + "Name":{ + "shape":"ThemeName", + "documentation":"

The name for the theme.

" + }, + "BaseThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The theme ID, defined by Amazon QuickSight, that a custom theme inherits from. All themes initially inherit from a default QuickSight theme.

" + }, + "VersionDescription":{ + "shape":"VersionDescription", + "documentation":"

A description of the theme version that you're updating Every time that you call UpdateTheme, you create a new version of the theme. Each version of the theme maintains a description of the version in VersionDescription.

" + }, + "Configuration":{ + "shape":"ThemeConfiguration", + "documentation":"

The theme configuration, which contains the theme display properties.

" + } + } + }, + "UpdateThemeResponse":{ + "type":"structure", + "members":{ + "ThemeId":{ + "shape":"RestrictiveResourceId", + "documentation":"

The ID for the theme.

" + }, + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the theme.

" + }, + "VersionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the new version of the theme.

" + }, + "CreationStatus":{ + "shape":"ResourceStatus", + "documentation":"

The creation status of the theme.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The AWS request ID for this operation.

" + } + } + }, "UpdateUserRequest":{ "type":"structure", "required":[ diff --git a/services/ram/pom.xml b/services/ram/pom.xml index ea7e692a7fe9..c4cef4e2db8d 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/ram/src/main/resources/codegen-resources/service-2.json b/services/ram/src/main/resources/codegen-resources/service-2.json index b3b4ce4219ce..3f2223982631 100644 --- a/services/ram/src/main/resources/codegen-resources/service-2.json +++ b/services/ram/src/main/resources/codegen-resources/service-2.json @@ -207,6 +207,7 @@ {"shape":"MalformedArnException"}, {"shape":"InvalidNextTokenException"}, {"shape":"InvalidParameterException"}, + {"shape":"ResourceArnNotFoundException"}, {"shape":"ServerInternalException"}, {"shape":"ServiceUnavailableException"} ], @@ -243,6 +244,7 @@ {"shape":"ResourceShareInvitationArnNotFoundException"}, {"shape":"InvalidMaxResultsException"}, {"shape":"MalformedArnException"}, + {"shape":"UnknownResourceException"}, {"shape":"InvalidNextTokenException"}, {"shape":"InvalidParameterException"}, {"shape":"ServerInternalException"}, @@ -343,6 +345,22 @@ ], "documentation":"

Lists the AWS RAM permissions that are associated with a resource share.

" }, + "ListResourceTypes":{ + "name":"ListResourceTypes", + "http":{ + "method":"POST", + "requestUri":"/listresourcetypes" + }, + "input":{"shape":"ListResourceTypesRequest"}, + "output":{"shape":"ListResourceTypesResponse"}, + "errors":[ + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ServerInternalException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Lists the shareable resource types supported by AWS RAM.

" + }, "ListResources":{ "name":"ListResources", "http":{ @@ -376,7 +394,8 @@ {"shape":"InvalidParameterException"}, {"shape":"MissingRequiredParameterException"}, {"shape":"ServerInternalException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"UnknownResourceException"} ], "documentation":"

Resource shares that were created by attaching a policy to a resource are visible only to the resource share owner, and the resource share cannot be modified in AWS RAM.

Use this API action to promote the resource share. When you promote the resource share, it becomes:

  • Visible to all principals that it is shared with.

  • Modifiable in AWS RAM.

" }, @@ -1058,7 +1077,7 @@ }, "resourceType":{ "shape":"String", - "documentation":"

The resource type.

Valid values: ec2:CapacityReservation | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | license-manager:LicenseConfiguration | rds:Cluster | route53resolver:ResolverRule I resource-groups:Group

" + "documentation":"

The resource type.

Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster | route53resolver:ResolverRule

" }, "resourceShareArns":{ "shape":"ResourceShareArnList", @@ -1118,6 +1137,32 @@ } } }, + "ListResourceTypesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

The token for the next page of results.

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + } + } + }, + "ListResourceTypesResponse":{ + "type":"structure", + "members":{ + "resourceTypes":{ + "shape":"ServiceNameAndResourceTypeList", + "documentation":"

The shareable resource types supported by AWS RAM.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

" + } + } + }, "ListResourcesRequest":{ "type":"structure", "required":["resourceOwner"], @@ -1132,7 +1177,7 @@ }, "resourceType":{ "shape":"String", - "documentation":"

The resource type.

Valid values: ec2:CapacityReservation | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | license-manager:LicenseConfiguration | rds:Cluster | route53resolver:ResolverRule | resource-groups:Group

" + "documentation":"

The resource type.

Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster | route53resolver:ResolverRule

" }, "resourceArns":{ "shape":"ResourceArnList", @@ -1696,6 +1741,24 @@ "error":{"httpStatusCode":500}, "exception":true }, + "ServiceNameAndResourceType":{ + "type":"structure", + "members":{ + "resourceType":{ + "shape":"String", + "documentation":"

The shareable resource types.

" + }, + "serviceName":{ + "shape":"String", + "documentation":"

The name of the AWS services to which the resources belong.

" + } + }, + "documentation":"

Information about the shareable resource types and the AWS services to which they belong.

" + }, + "ServiceNameAndResourceTypeList":{ + "type":"list", + "member":{"shape":"ServiceNameAndResourceType"} + }, "ServiceUnavailableException":{ "type":"structure", "required":["message"], diff --git a/services/rds/pom.xml b/services/rds/pom.xml index 081964d70084..d5b42cb3d29d 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rds/src/main/java/software/amazon/awssdk/services/rds/internal/CopyDbClusterSnapshotPresignInterceptor.java b/services/rds/src/main/java/software/amazon/awssdk/services/rds/internal/CopyDbClusterSnapshotPresignInterceptor.java new file mode 100644 index 000000000000..9d41d1161626 --- /dev/null +++ b/services/rds/src/main/java/software/amazon/awssdk/services/rds/internal/CopyDbClusterSnapshotPresignInterceptor.java @@ -0,0 +1,50 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.rds.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.services.rds.model.CopyDbClusterSnapshotRequest; +import software.amazon.awssdk.services.rds.transform.CopyDbClusterSnapshotRequestMarshaller; + +/** + * Handler for pre-signing {@link CopyDbClusterSnapshotRequest}. + */ +@SdkInternalApi +public final class CopyDbClusterSnapshotPresignInterceptor extends RdsPresignInterceptor { + + public static final CopyDbClusterSnapshotRequestMarshaller MARSHALLER = + new CopyDbClusterSnapshotRequestMarshaller(PROTOCOL_FACTORY); + + public CopyDbClusterSnapshotPresignInterceptor() { + super(CopyDbClusterSnapshotRequest.class); + } + + @Override + protected PresignableRequest adaptRequest(final CopyDbClusterSnapshotRequest originalRequest) { + return new PresignableRequest() { + @Override + public String getSourceRegion() { + return originalRequest.sourceRegion(); + } + + @Override + public SdkHttpFullRequest marshall() { + return MARSHALLER.marshall(originalRequest); + } + }; + } +} diff --git a/services/rds/src/main/java/software/amazon/awssdk/services/rds/internal/CreateDbClusterPresignInterceptor.java b/services/rds/src/main/java/software/amazon/awssdk/services/rds/internal/CreateDbClusterPresignInterceptor.java new file mode 100644 index 000000000000..8ba81f71f2ea --- /dev/null +++ b/services/rds/src/main/java/software/amazon/awssdk/services/rds/internal/CreateDbClusterPresignInterceptor.java @@ -0,0 +1,50 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.rds.internal; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.services.rds.model.CreateDbClusterRequest; +import software.amazon.awssdk.services.rds.transform.CreateDbClusterRequestMarshaller; + +/** + * Handler for pre-signing {@link CreateDbClusterRequest}. + */ +@SdkInternalApi +public final class CreateDbClusterPresignInterceptor extends RdsPresignInterceptor { + + public static final CreateDbClusterRequestMarshaller MARSHALLER = + new CreateDbClusterRequestMarshaller(PROTOCOL_FACTORY); + + public CreateDbClusterPresignInterceptor() { + super(CreateDbClusterRequest.class); + } + + @Override + protected PresignableRequest adaptRequest(final CreateDbClusterRequest originalRequest) { + return new PresignableRequest() { + @Override + public String getSourceRegion() { + return originalRequest.sourceRegion(); + } + + @Override + public SdkHttpFullRequest marshall() { + return MARSHALLER.marshall(originalRequest); + } + }; + } +} diff --git a/services/rds/src/main/java/software/amazon/awssdk/services/rds/internal/RdsPresignInterceptor.java b/services/rds/src/main/java/software/amazon/awssdk/services/rds/internal/RdsPresignInterceptor.java index 41ec7ca5b8b1..171fcb4abee1 100644 --- a/services/rds/src/main/java/software/amazon/awssdk/services/rds/internal/RdsPresignInterceptor.java +++ b/services/rds/src/main/java/software/amazon/awssdk/services/rds/internal/RdsPresignInterceptor.java @@ -21,9 +21,9 @@ import java.time.Clock; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.signer.Aws4Signer; +import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; import software.amazon.awssdk.auth.signer.params.Aws4PresignerParams; import software.amazon.awssdk.awscore.endpoint.DefaultServiceEndpointBuilder; -import software.amazon.awssdk.awscore.util.AwsHostNameUtils; import software.amazon.awssdk.core.Protocol; import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; @@ -103,16 +103,10 @@ public final SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, return request; } - String destinationRegion = - AwsHostNameUtils.parseSigningRegion(request.host(), SERVICE_NAME) - .orElseThrow(() -> new IllegalArgumentException("Could not determine region for " + - request.host())) - .id(); + String destinationRegion = executionAttributes.getAttribute(AwsSignerExecutionAttribute.SIGNING_REGION).id(); URI endpoint = createEndpoint(sourceRegion, SERVICE_NAME); - SdkHttpFullRequest.Builder marshalledRequest = presignableRequest.marshall() - .toBuilder() - .uri(endpoint); + SdkHttpFullRequest.Builder marshalledRequest = presignableRequest.marshall().toBuilder().uri(endpoint); SdkHttpFullRequest requestToPresign = marshalledRequest.method(SdkHttpMethod.GET) diff --git a/services/rds/src/main/resources/codegen-resources/customization.config b/services/rds/src/main/resources/codegen-resources/customization.config index ca9d9c4d21aa..2faf7d72ba5c 100644 --- a/services/rds/src/main/resources/codegen-resources/customization.config +++ b/services/rds/src/main/resources/codegen-resources/customization.config @@ -6,7 +6,7 @@ // This is for enabling automatic request presigning only; it should not be marshalled "SourceRegion" : { "shape" : "String", - "documentation" : "The region where the source snapshot is located." + "documentation" : "If PreSignedUrl is not specified, this is the region where the source snapshot is located. A PreSignedUrl will be generated automatically by the SDK." } } ] @@ -17,12 +17,33 @@ // This is for enabling automatic request presigning only; it should not be marshalled "SourceRegion" : { "shape" : "String", - "documentation" : "The region where the source instance is located." + "documentation" : "If PreSignedUrl is not specified, this is the region where the source snapshot is located. A PreSignedUrl will be generated automatically by the SDK." + } + } + ] + }, + "CopyDBClusterSnapshotMessage" : { + "inject" : [ + { + // This is for enabling automatic request presigning only; it should not be marshalled + "SourceRegion" : { + "shape" : "String", + "documentation" : "If PreSignedUrl is not specified, this is the region where the source snapshot is located. A PreSignedUrl will be generated automatically by the SDK." + } + } + ] + }, + "CreateDBClusterMessage" : { + "inject" : [ + { + // This is for enabling automatic request presigning only; it should not be marshalled + "SourceRegion" : { + "shape" : "String", + "documentation" : "If PreSignedUrl is not specified, this is the region where the source snapshot is located. A PreSignedUrl will be generated automatically by the SDK." } } ] } - }, "blacklistedSimpleMethods" : ["failoverDBCluster"], "deprecatedShapes" : [ diff --git a/services/rds/src/main/resources/codegen-resources/paginators-1.json b/services/rds/src/main/resources/codegen-resources/paginators-1.json index 7fc21f579564..e4a45469e4c9 100755 --- a/services/rds/src/main/resources/codegen-resources/paginators-1.json +++ b/services/rds/src/main/resources/codegen-resources/paginators-1.json @@ -1,11 +1,47 @@ { "pagination": { + "DescribeCertificates": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "Certificates" + }, "DescribeCustomAvailabilityZones": { "input_token": "Marker", "limit_key": "MaxRecords", "output_token": "Marker", "result_key": "CustomAvailabilityZones" }, + "DescribeDBClusterBacktracks": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterBacktracks" + }, + "DescribeDBClusterEndpoints": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterEndpoints" + }, + "DescribeDBClusterParameterGroups": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterParameterGroups" + }, + "DescribeDBClusterParameters": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "Parameters" + }, + "DescribeDBClusterSnapshots": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterSnapshots" + }, "DescribeDBClusters": { "input_token": "Marker", "limit_key": "MaxRecords", @@ -138,6 +174,12 @@ "output_token": "Marker", "result_key": "OrderableDBInstanceOptions" }, + "DescribePendingMaintenanceActions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "PendingMaintenanceActions" + }, "DescribeReservedDBInstances": { "input_token": "Marker", "limit_key": "MaxRecords", @@ -150,6 +192,12 @@ "output_token": "Marker", "result_key": "ReservedDBInstancesOfferings" }, + "DescribeSourceRegions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "SourceRegions" + }, "DownloadDBLogFilePortion": { "input_token": "Marker", "limit_key": "NumberOfLines", diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index 076c6c692184..0feeb54cd835 100755 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -68,8 +68,10 @@ "input":{"shape":"AddTagsToResourceMessage"}, "errors":[ {"shape":"DBInstanceNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"}, {"shape":"DBSnapshotNotFoundFault"}, - {"shape":"DBClusterNotFoundFault"} + {"shape":"DBProxyNotFoundFault"}, + {"shape":"DBProxyTargetGroupNotFoundFault"} ], "documentation":"

Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.

For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.

" }, @@ -125,7 +127,7 @@ {"shape":"DBClusterNotFoundFault"}, {"shape":"InvalidDBClusterStateFault"} ], - "documentation":"

Backtracks a DB cluster to a specific time, without creating a new DB cluster.

For more information on backtracking, see Backtracking an Aurora DB Cluster in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" + "documentation":"

Backtracks a DB cluster to a specific time, without creating a new DB cluster.

For more information on backtracking, see Backtracking an Aurora DB Cluster in the Amazon Aurora User Guide.

This action only applies to Aurora MySQL DB clusters.

" }, "CancelExportTask":{ "name":"CancelExportTask", @@ -288,7 +290,7 @@ {"shape":"InvalidGlobalClusterStateFault"}, {"shape":"DomainNotFoundFault"} ], - "documentation":"

Creates a new Amazon Aurora DB cluster.

You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon RDS MySQL DB instance. For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier is encrypted, you must also specify the PreSignedUrl parameter.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" + "documentation":"

Creates a new Amazon Aurora DB cluster.

You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a read replica of another DB cluster or Amazon RDS MySQL DB instance. For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier is encrypted, you must also specify the PreSignedUrl parameter.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" }, "CreateDBClusterEndpoint":{ "name":"CreateDBClusterEndpoint", @@ -414,7 +416,7 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"DomainNotFoundFault"} ], - "documentation":"

Creates a new DB instance that acts as a Read Replica for an existing source DB instance. You can create a Read Replica for a DB instance running MySQL, MariaDB, Oracle, or PostgreSQL. For more information, see Working with Read Replicas in the Amazon RDS User Guide.

Amazon Aurora doesn't support this action. You must call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All Read Replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified following.

Your source DB instance must have backup retention enabled.

" + "documentation":"

Creates a new DB instance that acts as a read replica for an existing source DB instance. You can create a read replica for a DB instance running MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read Replicas in the Amazon RDS User Guide.

Amazon Aurora doesn't support this action. Call the CreateDBInstance action to create a DB instance for an Aurora DB cluster.

All read replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified.

Your source DB instance must have backup retention enabled.

" }, "CreateDBParameterGroup":{ "name":"CreateDBParameterGroup", @@ -449,7 +451,7 @@ {"shape":"DBProxyAlreadyExistsFault"}, {"shape":"DBProxyQuotaExceededFault"} ], - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Creates a new DB proxy.

" + "documentation":"

Creates a new DB proxy.

" }, "CreateDBSecurityGroup":{ "name":"CreateDBSecurityGroup", @@ -547,7 +549,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

Creates an Aurora global database spread across multiple regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.

You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.

This action only applies to Aurora DB clusters.

" + "documentation":"

Creates an Aurora global database spread across multiple regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.

You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.

This action only applies to Aurora DB clusters.

" }, "CreateOptionGroup":{ "name":"CreateOptionGroup", @@ -670,7 +672,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBInstanceAutomatedBackupQuotaExceededFault"} ], - "documentation":"

The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

If you request a final DB snapshot the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

When a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter.

If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following conditions are true:

  • The DB cluster is a Read Replica of another Amazon Aurora DB cluster.

  • The DB instance is the only instance in the DB cluster.

To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no longer a Read Replica. After the promotion completes, then call the DeleteDBInstance API action to delete the final instance in the DB cluster.

" + "documentation":"

The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by DeleteDBInstance are not deleted.

If you request a final DB snapshot the status of the Amazon RDS DB instance is deleting until the DB snapshot is created. The API action DescribeDBInstance is used to monitor the status of this operation. The action can't be canceled or reverted once submitted.

When a DB instance is in a failure state and has a status of failed, incompatible-restore, or incompatible-network, you can only delete it when you skip creation of the final snapshot with the SkipFinalSnapshot parameter.

If the specified DB instance is part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of the following conditions are true:

  • The DB cluster is a read replica of another Amazon Aurora DB cluster.

  • The DB instance is the only instance in the DB cluster.

To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no longer a read replica. After the promotion completes, then call the DeleteDBInstance API action to delete the final instance in the DB cluster.

" }, "DeleteDBInstanceAutomatedBackup":{ "name":"DeleteDBInstanceAutomatedBackup", @@ -717,7 +719,7 @@ {"shape":"DBProxyNotFoundFault"}, {"shape":"InvalidDBProxyStateFault"} ], - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Deletes an existing proxy.

" + "documentation":"

Deletes an existing proxy.

" }, "DeleteDBSecurityGroup":{ "name":"DeleteDBSecurityGroup", @@ -843,7 +845,7 @@ {"shape":"DBProxyNotFoundFault"}, {"shape":"InvalidDBProxyStateFault"} ], - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Remove the association between one or more DBProxyTarget data structures and a DBProxyTargetGroup.

" + "documentation":"

Remove the association between one or more DBProxyTarget data structures and a DBProxyTargetGroup.

" }, "DescribeAccountAttributes":{ "name":"DescribeAccountAttributes", @@ -905,7 +907,7 @@ {"shape":"DBClusterNotFoundFault"}, {"shape":"DBClusterBacktrackNotFoundFault"} ], - "documentation":"

Returns information about backtracks for a DB cluster.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" + "documentation":"

Returns information about backtracks for a DB cluster.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora MySQL DB clusters.

" }, "DescribeDBClusterEndpoints":{ "name":"DescribeDBClusterEndpoints", @@ -1110,7 +1112,7 @@ "errors":[ {"shape":"DBProxyNotFoundFault"} ], - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Returns information about DB proxies.

" + "documentation":"

Returns information about DB proxies.

" }, "DescribeDBProxyTargetGroups":{ "name":"DescribeDBProxyTargetGroups", @@ -1128,7 +1130,7 @@ {"shape":"DBProxyTargetGroupNotFoundFault"}, {"shape":"InvalidDBProxyStateFault"} ], - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Returns information about DB proxy target groups, represented by DBProxyTargetGroup data structures.

" + "documentation":"

Returns information about DB proxy target groups, represented by DBProxyTargetGroup data structures.

" }, "DescribeDBProxyTargets":{ "name":"DescribeDBProxyTargets", @@ -1147,7 +1149,7 @@ {"shape":"DBProxyTargetGroupNotFoundFault"}, {"shape":"InvalidDBProxyStateFault"} ], - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Returns information about DBProxyTarget objects. This API supports pagination.

" + "documentation":"

Returns information about DBProxyTarget objects. This API supports pagination.

" }, "DescribeDBSecurityGroups":{ "name":"DescribeDBSecurityGroups", @@ -1430,7 +1432,7 @@ "shape":"SourceRegionMessage", "resultWrapper":"DescribeSourceRegionsResult" }, - "documentation":"

Returns a list of the source AWS Regions where the current AWS Region can create a Read Replica or copy a DB snapshot from. This API action supports pagination.

" + "documentation":"

Returns a list of the source AWS Regions where the current AWS Region can create a read replica or copy a DB snapshot from. This API action supports pagination.

" }, "DescribeValidDBInstanceModifications":{ "name":"DescribeValidDBInstanceModifications", @@ -1515,7 +1517,9 @@ "errors":[ {"shape":"DBInstanceNotFoundFault"}, {"shape":"DBSnapshotNotFoundFault"}, - {"shape":"DBClusterNotFoundFault"} + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBProxyNotFoundFault"}, + {"shape":"DBProxyTargetGroupNotFoundFault"} ], "documentation":"

Lists all tags on an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

" }, @@ -1633,7 +1637,7 @@ {"shape":"InvalidDBClusterSnapshotStateFault"}, {"shape":"SharedSnapshotQuotaExceededFault"} ], - "documentation":"

Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

To share a manual DB cluster snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value all to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot public or private, use the DescribeDBClusterSnapshotAttributes API action.

This action only applies to Aurora DB clusters.

" + "documentation":"

Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

To share a manual DB cluster snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value all to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts.

Don't add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts.

If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are returned as values for the restore attribute.

This action only applies to Aurora DB clusters.

" }, "ModifyDBInstance":{ "name":"ModifyDBInstance", @@ -1663,7 +1667,9 @@ {"shape":"AuthorizationNotFoundFault"}, {"shape":"CertificateNotFoundFault"}, {"shape":"DomainNotFoundFault"}, - {"shape":"BackupPolicyNotFoundFault"} + {"shape":"BackupPolicyNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"InvalidDBClusterStateFault"} ], "documentation":"

Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call DescribeValidDBInstanceModifications before you call ModifyDBInstance.

" }, @@ -1700,7 +1706,7 @@ {"shape":"DBProxyAlreadyExistsFault"}, {"shape":"InvalidDBProxyStateFault"} ], - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Changes the settings for an existing DB proxy.

" + "documentation":"

Changes the settings for an existing DB proxy.

" }, "ModifyDBProxyTargetGroup":{ "name":"ModifyDBProxyTargetGroup", @@ -1718,7 +1724,7 @@ {"shape":"DBProxyTargetGroupNotFoundFault"}, {"shape":"InvalidDBProxyStateFault"} ], - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Modifies the properties of a DBProxyTargetGroup.

" + "documentation":"

Modifies the properties of a DBProxyTargetGroup.

" }, "ModifyDBSnapshot":{ "name":"ModifyDBSnapshot", @@ -1734,7 +1740,7 @@ "errors":[ {"shape":"DBSnapshotNotFoundFault"} ], - "documentation":"

Updates a manual DB snapshot, which can be encrypted or not encrypted, with a new engine version.

Amazon RDS supports upgrading DB snapshots for MySQL, Oracle, and PostgreSQL.

" + "documentation":"

Updates a manual DB snapshot with a new engine version. The snapshot can be encrypted or unencrypted, but not shared or public.

Amazon RDS supports upgrading DB snapshots for MySQL, Oracle, and PostgreSQL.

" }, "ModifyDBSnapshotAttribute":{ "name":"ModifyDBSnapshotAttribute", @@ -1752,7 +1758,7 @@ {"shape":"InvalidDBSnapshotStateFault"}, {"shape":"SharedSnapshotQuotaExceededFault"} ], - "documentation":"

Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.

To share a manual DB snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB snapshot. Uses the value all to make the manual DB snapshot public, which means it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB snapshots that contain private information that you don't want available to all AWS accounts. If the manual DB snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

To view which AWS accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action.

" + "documentation":"

Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.

To share a manual DB snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB snapshot. Uses the value all to make the manual DB snapshot public, which means it can be copied or restored by all AWS accounts.

Don't add the all value for any manual DB snapshots that contain private information that you don't want available to all AWS accounts.

If the manual DB snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

To view which AWS accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as values for the restore attribute.

" }, "ModifyDBSubnetGroup":{ "name":"ModifyDBSubnetGroup", @@ -1844,7 +1850,7 @@ {"shape":"InvalidDBInstanceStateFault"}, {"shape":"DBInstanceNotFoundFault"} ], - "documentation":"

Promotes a Read Replica DB instance to a standalone DB instance.

  • Backup duration is a function of the amount of changes to the database since the previous backup. If you plan to promote a Read Replica to a standalone instance, we recommend that you enable backups and complete at least one backup prior to promotion. In addition, a Read Replica cannot be promoted to a standalone instance when it is in the backing-up status. If you have enabled backups on your Read Replica, configure the automated backup window so that daily backups do not interfere with Read Replica promotion.

  • This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.

" + "documentation":"

Promotes a read replica DB instance to a standalone DB instance.

  • Backup duration is a function of the amount of changes to the database since the previous backup. If you plan to promote a read replica to a standalone instance, we recommend that you enable backups and complete at least one backup prior to promotion. In addition, a read replica cannot be promoted to a standalone instance when it is in the backing-up status. If you have enabled backups on your read replica, configure the automated backup window so that daily backups do not interfere with read replica promotion.

  • This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.

" }, "PromoteReadReplicaDBCluster":{ "name":"PromoteReadReplicaDBCluster", @@ -1861,7 +1867,7 @@ {"shape":"DBClusterNotFoundFault"}, {"shape":"InvalidDBClusterStateFault"} ], - "documentation":"

Promotes a Read Replica DB cluster to a standalone DB cluster.

This action only applies to Aurora DB clusters.

" + "documentation":"

Promotes a read replica DB cluster to a standalone DB cluster.

This action only applies to Aurora DB clusters.

" }, "PurchaseReservedDBInstancesOffering":{ "name":"PurchaseReservedDBInstancesOffering", @@ -1919,7 +1925,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"InvalidDBProxyStateFault"} ], - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Associate one or more DBProxyTarget data structures with a DBProxyTargetGroup.

" + "documentation":"

Associate one or more DBProxyTarget data structures with a DBProxyTargetGroup.

" }, "RemoveFromGlobalCluster":{ "name":"RemoveFromGlobalCluster", @@ -1994,7 +2000,9 @@ "errors":[ {"shape":"DBInstanceNotFoundFault"}, {"shape":"DBSnapshotNotFoundFault"}, - {"shape":"DBClusterNotFoundFault"} + {"shape":"DBClusterNotFoundFault"}, + {"shape":"DBProxyNotFoundFault"}, + {"shape":"DBProxyTargetGroupNotFoundFault"} ], "documentation":"

Removes metadata tags from an Amazon RDS resource.

For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

" }, @@ -2059,7 +2067,7 @@ {"shape":"DomainNotFoundFault"}, {"shape":"InsufficientStorageClusterCapacityFault"} ], - "documentation":"

Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket. Amazon RDS must be authorized to access the Amazon S3 bucket and the data must be created using the Percona XtraBackup utility as described in Migrating Data to an Amazon Aurora MySQL DB Cluster in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" + "documentation":"

Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket. Amazon RDS must be authorized to access the Amazon S3 bucket and the data must be created using the Percona XtraBackup utility as described in Migrating Data to an Amazon Aurora MySQL DB Cluster in the Amazon Aurora User Guide.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterFromS3 action has completed and the DB cluster is available.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" }, "RestoreDBClusterFromSnapshot":{ "name":"RestoreDBClusterFromSnapshot", @@ -2093,7 +2101,7 @@ {"shape":"DomainNotFoundFault"}, {"shape":"DBClusterParameterGroupNotFoundFault"} ], - "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster. If you don't specify a security group, the new DB cluster is associated with the default security group.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" + "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot. This action only applies to Aurora DB clusters.

The target DB cluster is created from the source snapshot with a default configuration. If you don't specify a security group, the new DB cluster is associated with the default security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterFromSnapshot action has completed and the DB cluster is available.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

This action only applies to Aurora DB clusters.

" }, "RestoreDBClusterToPointInTime":{ "name":"RestoreDBClusterToPointInTime", @@ -2425,7 +2433,7 @@ "documentation":"

The maximum allowed value for the quota.

" } }, - "documentation":"

Describes a quota for an AWS account.

The following are account quotas:

  • AllocatedStorage - The total allocated storage per account, in GiB. The used value is the total allocated storage in the account, in GiB.

  • AuthorizationsPerDBSecurityGroup - The number of ingress rules per DB security group. The used value is the highest number of ingress rules in a DB security group in the account. Other DB security groups in the account might have a lower number of ingress rules.

  • CustomEndpointsPerDBCluster - The number of custom endpoints per DB cluster. The used value is the highest number of custom endpoints in a DB clusters in the account. Other DB clusters in the account might have a lower number of custom endpoints.

  • DBClusterParameterGroups - The number of DB cluster parameter groups per account, excluding default parameter groups. The used value is the count of nondefault DB cluster parameter groups in the account.

  • DBClusterRoles - The number of associated AWS Identity and Access Management (IAM) roles per DB cluster. The used value is the highest number of associated IAM roles for a DB cluster in the account. Other DB clusters in the account might have a lower number of associated IAM roles.

  • DBClusters - The number of DB clusters per account. The used value is the count of DB clusters in the account.

  • DBInstanceRoles - The number of associated IAM roles per DB instance. The used value is the highest number of associated IAM roles for a DB instance in the account. Other DB instances in the account might have a lower number of associated IAM roles.

  • DBInstances - The number of DB instances per account. The used value is the count of the DB instances in the account.

    Amazon RDS DB instances, Amazon Aurora DB instances, Amazon Neptune instances, and Amazon DocumentDB instances apply to this quota.

  • DBParameterGroups - The number of DB parameter groups per account, excluding default parameter groups. The used value is the count of nondefault DB parameter groups in the account.

  • DBSecurityGroups - The number of DB security groups (not VPC security groups) per account, excluding the default security group. The used value is the count of nondefault DB security groups in the account.

  • DBSubnetGroups - The number of DB subnet groups per account. The used value is the count of the DB subnet groups in the account.

  • EventSubscriptions - The number of event subscriptions per account. The used value is the count of the event subscriptions in the account.

  • ManualSnapshots - The number of manual DB snapshots per account. The used value is the count of the manual DB snapshots in the account.

  • OptionGroups - The number of DB option groups per account, excluding default option groups. The used value is the count of nondefault DB option groups in the account.

  • ReadReplicasPerMaster - The number of Read Replicas per DB instance. The used value is the highest number of Read Replicas for a DB instance in the account. Other DB instances in the account might have a lower number of Read Replicas.

  • ReservedDBInstances - The number of reserved DB instances per account. The used value is the count of the active reserved DB instances in the account.

  • SubnetsPerDBSubnetGroup - The number of subnets per DB subnet group. The used value is highest number of subnets for a DB subnet group in the account. Other DB subnet groups in the account might have a lower number of subnets.

For more information, see Quotas for Amazon RDS in the Amazon RDS User Guide and Quotas for Amazon Aurora in the Amazon Aurora User Guide.

", + "documentation":"

Describes a quota for an AWS account.

The following are account quotas:

  • AllocatedStorage - The total allocated storage per account, in GiB. The used value is the total allocated storage in the account, in GiB.

  • AuthorizationsPerDBSecurityGroup - The number of ingress rules per DB security group. The used value is the highest number of ingress rules in a DB security group in the account. Other DB security groups in the account might have a lower number of ingress rules.

  • CustomEndpointsPerDBCluster - The number of custom endpoints per DB cluster. The used value is the highest number of custom endpoints in a DB clusters in the account. Other DB clusters in the account might have a lower number of custom endpoints.

  • DBClusterParameterGroups - The number of DB cluster parameter groups per account, excluding default parameter groups. The used value is the count of nondefault DB cluster parameter groups in the account.

  • DBClusterRoles - The number of associated AWS Identity and Access Management (IAM) roles per DB cluster. The used value is the highest number of associated IAM roles for a DB cluster in the account. Other DB clusters in the account might have a lower number of associated IAM roles.

  • DBClusters - The number of DB clusters per account. The used value is the count of DB clusters in the account.

  • DBInstanceRoles - The number of associated IAM roles per DB instance. The used value is the highest number of associated IAM roles for a DB instance in the account. Other DB instances in the account might have a lower number of associated IAM roles.

  • DBInstances - The number of DB instances per account. The used value is the count of the DB instances in the account.

    Amazon RDS DB instances, Amazon Aurora DB instances, Amazon Neptune instances, and Amazon DocumentDB instances apply to this quota.

  • DBParameterGroups - The number of DB parameter groups per account, excluding default parameter groups. The used value is the count of nondefault DB parameter groups in the account.

  • DBSecurityGroups - The number of DB security groups (not VPC security groups) per account, excluding the default security group. The used value is the count of nondefault DB security groups in the account.

  • DBSubnetGroups - The number of DB subnet groups per account. The used value is the count of the DB subnet groups in the account.

  • EventSubscriptions - The number of event subscriptions per account. The used value is the count of the event subscriptions in the account.

  • ManualSnapshots - The number of manual DB snapshots per account. The used value is the count of the manual DB snapshots in the account.

  • OptionGroups - The number of DB option groups per account, excluding default option groups. The used value is the count of nondefault DB option groups in the account.

  • ReadReplicasPerMaster - The number of read replicas per DB instance. The used value is the highest number of read replicas for a DB instance in the account. Other DB instances in the account might have a lower number of read replicas.

  • ReservedDBInstances - The number of reserved DB instances per account. The used value is the count of the active reserved DB instances in the account.

  • SubnetsPerDBSubnetGroup - The number of subnets per DB subnet group. The used value is highest number of subnets for a DB subnet group in the account. Other DB subnet groups in the account might have a lower number of subnets.

For more information, see Quotas for Amazon RDS in the Amazon RDS User Guide and Quotas for Amazon Aurora in the Amazon Aurora User Guide.

", "wrapper":true }, "AccountQuotaList":{ @@ -2877,7 +2885,7 @@ "documentation":"

One or more SQL statements for the proxy to run when opening each new database connection. Typically used with SET statements to make sure that each connection has identical settings such as time zone and character set. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single SET statement, such as SET x=1, y=2.

Default: no initialization query

" } }, - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Specifies the settings that control the size and behavior of the connection pool associated with a DBProxyTargetGroup.

" + "documentation":"

Specifies the settings that control the size and behavior of the connection pool associated with a DBProxyTargetGroup.

" }, "ConnectionPoolConfigurationInfo":{ "type":"structure", @@ -2903,7 +2911,7 @@ "documentation":"

One or more SQL statements for the proxy to run when opening each new database connection. Typically used with SET statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single SET statement, such as SET x=1, y=2.

" } }, - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Displays the settings that control the size and behavior of the connection pool associated with a DBProxyTarget.

" + "documentation":"

Displays the settings that control the size and behavior of the connection pool associated with a DBProxyTarget.

" }, "CopyDBClusterParameterGroupMessage":{ "type":"structure", @@ -2955,7 +2963,7 @@ }, "PreSignedUrl":{ "shape":"String", - "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another AWS Region. Don't specify PreSignedUrl when you are copying an encrypted DB cluster snapshot in the same AWS Region.

The pre-signed URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

  • DestinationRegion - The name of the AWS Region that the DB cluster snapshot is to be created in.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.

" + "documentation":"

The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another AWS Region. Don't specify PreSignedUrl when you are copying an encrypted DB cluster snapshot in the same AWS Region.

The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

  • DestinationRegion - The name of the AWS Region that the DB cluster snapshot is to be created in.

  • SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.

" }, "CopyTags":{ "shape":"BooleanOptional", @@ -3208,7 +3216,7 @@ }, "ReplicationSourceIdentifier":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica.

" + "documentation":"

The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.

" }, "Tags":{ "shape":"TagList", @@ -3220,11 +3228,11 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key identifier for an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

If an encryption key isn't specified in KmsKeyId:

  • If ReplicationSourceIdentifier identifies an encrypted source, then Amazon RDS will use the encryption key used to encrypt the source. Otherwise, Amazon RDS will use your default encryption key.

  • If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't specified, then Amazon RDS will use your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

If you create a Read Replica of an encrypted DB cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the Read Replica in that AWS Region.

" + "documentation":"

The AWS KMS key identifier for an encrypted DB cluster.

The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.

If an encryption key isn't specified in KmsKeyId:

  • If ReplicationSourceIdentifier identifies an encrypted source, then Amazon RDS will use the encryption key used to encrypt the source. Otherwise, Amazon RDS will use your default encryption key.

  • If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't specified, then Amazon RDS will use your default encryption key.

AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

If you create a read replica of an encrypted DB cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the read replica in that AWS Region.

" }, "PreSignedUrl":{ "shape":"String", - "documentation":"

A URL that contains a Signature Version 4 signed request for the CreateDBCluster action to be called in the source AWS Region where the DB cluster is replicated from. You only need to specify PreSignedUrl when you are performing cross-region replication from an encrypted DB cluster.

The pre-signed URL must be a valid request for the CreateDBCluster API action that can be executed in the source AWS Region that contains the encrypted DB cluster to be copied.

The pre-signed URL request must contain the following parameter values:

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster in the destination AWS Region. This should refer to the same KMS key for both the CreateDBCluster action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

  • DestinationRegion - The name of the AWS Region that Aurora Read Replica will be created in.

  • ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster from the us-west-2 AWS Region, then your ReplicationSourceIdentifier would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.

" + "documentation":"

A URL that contains a Signature Version 4 signed request for the CreateDBCluster action to be called in the source AWS Region where the DB cluster is replicated from. You only need to specify PreSignedUrl when you are performing cross-region replication from an encrypted DB cluster.

The pre-signed URL must be a valid request for the CreateDBCluster API action that can be executed in the source AWS Region that contains the encrypted DB cluster to be copied.

The pre-signed URL request must contain the following parameter values:

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster in the destination AWS Region. This should refer to the same KMS key for both the CreateDBCluster action that is called in the destination AWS Region, and the action contained in the pre-signed URL.

  • DestinationRegion - The name of the AWS Region that Aurora read replica will be created in.

  • ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster from the us-west-2 AWS Region, then your ReplicationSourceIdentifier would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.

" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -3232,7 +3240,7 @@ }, "BacktrackWindow":{ "shape":"LongOptional", - "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" + "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Currently, Backtrack is only supported for Aurora MySQL DB clusters.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -3240,7 +3248,7 @@ }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide:

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

global engine mode only applies for global database clusters created with Aurora MySQL version 5.6.10a. For higher Aurora MySQL versions, the clusters in a global database use provisioned engine mode.

Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide:

" }, "ScalingConfiguration":{ "shape":"ScalingConfiguration", @@ -3264,11 +3272,15 @@ }, "Domain":{ "shape":"String", - "documentation":"

The Active Directory directory ID to create the DB cluster in.

For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster. For more information, see Using Kerberos Authentication for Aurora MySQL in the Amazon Aurora User Guide.

" + "documentation":"

The Active Directory directory ID to create the DB cluster in.

For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster. For more information, see Kerberos Authentication in the Amazon Aurora User Guide.

" }, "DomainIAMRoleName":{ "shape":"String", "documentation":"

Specify the name of the IAM role to be used when making API calls to the Directory Service.

" + }, + "EnableGlobalWriteForwarding":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether to enable write operations to be forwarded from this cluster to the primary cluster in an Aurora global database. The resulting changes are replicated back to this cluster. This parameter only applies to DB clusters that are secondary clusters in an Aurora global database. By default, Aurora disallows write operations for secondary clusters.

" } }, "documentation":"

" @@ -3402,7 +3414,7 @@ }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster.

Default: 1

Constraints:

  • Must be a value from 0 to 35

  • Can't be set to 0 if the DB instance is a source to Read Replicas

" + "documentation":"

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster.

Default: 1

Constraints:

  • Must be a value from 0 to 35

  • Can't be set to 0 if the DB instance is a source to read replicas

" }, "PreferredBackupWindow":{ "shape":"String", @@ -3410,7 +3422,7 @@ }, "Port":{ "shape":"IntegerOptional", - "documentation":"

The port number on which the database accepts connections.

MySQL

Default: 3306

Valid Values: 1150-65535

Type: Integer

MariaDB

Default: 3306

Valid Values: 1150-65535

Type: Integer

PostgreSQL

Default: 5432

Valid Values: 1150-65535

Type: Integer

Oracle

Default: 1521

Valid Values: 1150-65535

SQL Server

Default: 1433

Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

Amazon Aurora

Default: 3306

Valid Values: 1150-65535

Type: Integer

" + "documentation":"

The port number on which the database accepts connections.

MySQL

Default: 3306

Valid values: 1150-65535

Type: Integer

MariaDB

Default: 3306

Valid values: 1150-65535

Type: Integer

PostgreSQL

Default: 5432

Valid values: 1150-65535

Type: Integer

Oracle

Default: 1521

Valid values: 1150-65535

SQL Server

Default: 1433

Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156.

Amazon Aurora

Default: 3306

Valid values: 1150-65535

Type: Integer

" }, "MultiAZ":{ "shape":"BooleanOptional", @@ -3442,7 +3454,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance isn't publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

  • If the default VPC in the target region doesn’t have an Internet gateway attached to it, the DB instance is private.

  • If the default VPC in the target region has an Internet gateway attached to it, the DB instance is public.

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

  • If the subnets are part of a VPC that doesn’t have an Internet gateway attached to it, the DB instance is private.

  • If the subnets are part of a VPC that has an Internet gateway attached to it, the DB instance is public.

" + "documentation":"

A value that indicates whether the DB instance is publicly accessible.

When the DB instance is publicly accessible, its DNS endpoint resolves to the private IP address from within the DB instance's VPC, and to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses, and that public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

  • If the default VPC in the target region doesn’t have an Internet gateway attached to it, the DB instance is private.

  • If the default VPC in the target region has an Internet gateway attached to it, the DB instance is public.

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

  • If the subnets are part of a VPC that doesn’t have an Internet gateway attached to it, the DB instance is private.

  • If the subnets are part of a VPC that has an Internet gateway attached to it, the DB instance is public.

" }, "Tags":{ "shape":"TagList", @@ -3474,7 +3486,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

The Active Directory directory ID to create the DB instance in. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instance, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" + "documentation":"

The Active Directory directory ID to create the DB instance in. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", @@ -3544,19 +3556,19 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

The DB instance identifier of the Read Replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string.

" + "documentation":"

The DB instance identifier of the read replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string.

" }, "SourceDBInstanceIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB instance that will act as the source for the Read Replica. Each DB instance can have up to five Read Replicas.

Constraints:

  • Must be the identifier of an existing MySQL, MariaDB, Oracle, or PostgreSQL DB instance.

  • Can specify a DB instance that is a MySQL Read Replica only if the source is running MySQL 5.6 or later.

  • For the limitations of Oracle Read Replicas, see Read Replica Limitations with Oracle in the Amazon RDS User Guide.

  • Can specify a DB instance that is a PostgreSQL DB instance only if the source is running PostgreSQL 9.3.5 or later (9.4.7 and higher for cross-region replication).

  • The specified DB instance must have automatic backups enabled, its backup retention period must be greater than 0.

  • If the source DB instance is in the same AWS Region as the Read Replica, specify a valid DB instance identifier.

  • If the source DB instance is in a different AWS Region than the Read Replica, specify a valid DB instance ARN. For more information, go to Constructing an ARN for Amazon RDS in the Amazon RDS User Guide.

" + "documentation":"

The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to five read replicas.

Constraints:

  • Must be the identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server DB instance.

  • Can specify a DB instance that is a MySQL read replica only if the source is running MySQL 5.6 or later.

  • For the limitations of Oracle read replicas, see Read Replica Limitations with Oracle in the Amazon RDS User Guide.

  • For the limitations of SQL Server read replicas, see Read Replica Limitations with Microsoft SQL Server in the Amazon RDS User Guide.

  • Can specify a PostgreSQL DB instance only if the source is running PostgreSQL 9.3.5 or later (9.4.7 and higher for cross-region replication).

  • The specified DB instance must have automatic backups enabled, that is, its backup retention period must be greater than 0.

  • If the source DB instance is in the same AWS Region as the read replica, specify a valid DB instance identifier.

  • If the source DB instance is in a different AWS Region from the read replica, specify a valid DB instance ARN. For more information, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide. This doesn't apply to SQL Server, which doesn't support cross-region replicas.

" }, "DBInstanceClass":{ "shape":"String", - "documentation":"

The compute and memory capacity of the Read Replica, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Default: Inherits from the source DB instance.

" + "documentation":"

The compute and memory capacity of the read replica, for example, db.m4.large. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Default: Inherits from the source DB instance.

" }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The Availability Zone (AZ) where the Read Replica will be created.

Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.

Example: us-east-1d

" + "documentation":"

The Availability Zone (AZ) where the read replica will be created.

Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.

Example: us-east-1d

" }, "Port":{ "shape":"IntegerOptional", @@ -3564,11 +3576,11 @@ }, "MultiAZ":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether the Read Replica is in a Multi-AZ deployment.

You can create a Read Replica as a Multi-AZ DB instance. RDS creates a standby of your replica in another Availability Zone for failover support for the replica. Creating your Read Replica as a Multi-AZ DB instance is independent of whether the source database is a Multi-AZ DB instance.

" + "documentation":"

A value that indicates whether the read replica is in a Multi-AZ deployment.

You can create a read replica as a Multi-AZ DB instance. RDS creates a standby of your replica in another Availability Zone for failover support for the replica. Creating your read replica as a Multi-AZ DB instance is independent of whether the source database is a Multi-AZ DB instance.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether minor engine upgrades are applied automatically to the Read Replica during the maintenance window.

Default: Inherits from the source DB instance

" + "documentation":"

A value that indicates whether minor engine upgrades are applied automatically to the read replica during the maintenance window.

Default: Inherits from the source DB instance

" }, "Iops":{ "shape":"IntegerOptional", @@ -3576,36 +3588,36 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"

The option group the DB instance is associated with. If omitted, the option group associated with the source instance is used.

" + "documentation":"

The option group the DB instance is associated with. If omitted, the option group associated with the source instance is used.

For SQL Server, you must use the option group associated with the source instance.

" }, "DBParameterGroupName":{ "shape":"String", - "documentation":"

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of source DB instance for a same region Read Replica, or the default DBParameterGroup for the specified DB engine for a cross region Read Replica.

Currently, specifying a parameter group for this operation is only supported for Oracle DB instances.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Can't end with a hyphen or contain two consecutive hyphens

" + "documentation":"

The name of the DB parameter group to associate with this DB instance.

If you do not specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of source DB instance for a same region read replica, or the default DBParameterGroup for the specified DB engine for a cross region read replica.

Currently, specifying a parameter group for this operation is only supported for Oracle DB instances.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Can't end with a hyphen or contain two consecutive hyphens

" }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance isn't publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.

" + "documentation":"

A value that indicates whether the DB instance is publicly accessible.

When the DB instance is publicly accessible, its DNS endpoint resolves to the private IP address from within the DB instance's VPC, and to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses, and that public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

For more information, see CreateDBInstance.

" }, "Tags":{"shape":"TagList"}, "DBSubnetGroupName":{ "shape":"String", - "documentation":"

Specifies a DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC.

Constraints:

  • Can only be specified if the source DB instance identifier specifies a DB instance in another AWS Region.

  • If supplied, must match the name of an existing DBSubnetGroup.

  • The specified DB subnet group must be in the same AWS Region in which the operation is running.

  • All Read Replicas in one AWS Region that are created from the same source DB instance must either:>

    • Specify DB subnet groups from the same VPC. All these Read Replicas are created in the same VPC.

    • Not specify a DB subnet group. All these Read Replicas are created outside of any VPC.

Example: mySubnetgroup

" + "documentation":"

Specifies a DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC.

Constraints:

  • Can only be specified if the source DB instance identifier specifies a DB instance in another AWS Region.

  • If supplied, must match the name of an existing DBSubnetGroup.

  • The specified DB subnet group must be in the same AWS Region in which the operation is running.

  • All read replicas in one AWS Region that are created from the same source DB instance must either:>

    • Specify DB subnet groups from the same VPC. All these read replicas are created in the same VPC.

    • Not specify a DB subnet group. All these read replicas are created outside of any VPC.

Example: mySubnetgroup

" }, "VpcSecurityGroupIds":{ "shape":"VpcSecurityGroupIdList", - "documentation":"

A list of EC2 VPC security groups to associate with the Read Replica.

Default: The default EC2 VPC security group for the DB subnet group's VPC.

" + "documentation":"

A list of EC2 VPC security groups to associate with the read replica.

Default: The default EC2 VPC security group for the DB subnet group's VPC.

" }, "StorageType":{ "shape":"String", - "documentation":"

Specifies the storage type to be associated with the Read Replica.

Valid values: standard | gp2 | io1

If you specify io1, you must also include a value for the Iops parameter.

Default: io1 if the Iops parameter is specified, otherwise gp2

" + "documentation":"

Specifies the storage type to be associated with the read replica.

Valid values: standard | gp2 | io1

If you specify io1, you must also include a value for the Iops parameter.

Default: io1 if the Iops parameter is specified, otherwise gp2

" }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to copy all tags from the Read Replica to snapshots of the Read Replica. By default, tags are not copied.

" + "documentation":"

A value that indicates whether to copy all tags from the read replica to snapshots of the read replica. By default, tags are not copied.

" }, "MonitoringInterval":{ "shape":"IntegerOptional", - "documentation":"

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the Read Replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

" + "documentation":"

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the read replica. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

" }, "MonitoringRoleArn":{ "shape":"String", @@ -3613,11 +3625,11 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

The AWS KMS key ID for an encrypted Read Replica. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you create an encrypted Read Replica in the same AWS Region as the source DB instance, then you do not have to specify a value for this parameter. The Read Replica is encrypted with the same KMS key as the source DB instance.

If you create an encrypted Read Replica in a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.

You can't create an encrypted Read Replica from an unencrypted DB instance.

" + "documentation":"

The AWS KMS key ID for an encrypted read replica. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you create an encrypted read replica in the same AWS Region as the source DB instance, then you do not have to specify a value for this parameter. The read replica is encrypted with the same KMS key as the source DB instance.

If you create an encrypted read replica in a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.

You can't create an encrypted read replica from an unencrypted DB instance.

" }, "PreSignedUrl":{ "shape":"String", - "documentation":"

The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API action in the source AWS Region that contains the source DB instance.

You must specify this parameter when you create an encrypted Read Replica from another AWS Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are creating an encrypted Read Replica in the same AWS Region.

The presigned URL must be a valid request for the CreateDBInstanceReadReplica API action that can be executed in the source AWS Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values:

  • DestinationRegion - The AWS Region that the encrypted Read Replica is created in. This AWS Region is the same one where the CreateDBInstanceReadReplica action is called that contains this presigned URL.

    For example, if you create an encrypted DB instance in the us-west-1 AWS Region, from a source DB instance in the us-east-2 AWS Region, then you call the CreateDBInstanceReadReplica action in the us-east-1 AWS Region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica action in the us-west-2 AWS Region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 AWS Region.

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the Read Replica in the destination AWS Region. This is the same identifier for both the CreateDBInstanceReadReplica action that is called in the destination AWS Region, and the action contained in the presigned URL.

  • SourceDBInstanceIdentifier - The DB instance identifier for the encrypted DB instance to be replicated. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are creating an encrypted Read Replica from a DB instance in the us-west-2 AWS Region, then your SourceDBInstanceIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.

" + "documentation":"

The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API action in the source AWS Region that contains the source DB instance.

You must specify this parameter when you create an encrypted read replica from another AWS Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are creating an encrypted read replica in the same AWS Region.

The presigned URL must be a valid request for the CreateDBInstanceReadReplica API action that can be executed in the source AWS Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values:

  • DestinationRegion - The AWS Region that the encrypted read replica is created in. This AWS Region is the same one where the CreateDBInstanceReadReplica action is called that contains this presigned URL.

    For example, if you create an encrypted DB instance in the us-west-1 AWS Region, from a source DB instance in the us-east-2 AWS Region, then you call the CreateDBInstanceReadReplica action in the us-east-1 AWS Region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica action in the us-west-2 AWS Region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 AWS Region.

  • KmsKeyId - The AWS KMS key identifier for the key to use to encrypt the read replica in the destination AWS Region. This is the same identifier for both the CreateDBInstanceReadReplica action that is called in the destination AWS Region, and the action contained in the presigned URL.

  • SourceDBInstanceIdentifier - The DB instance identifier for the encrypted DB instance to be replicated. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are creating an encrypted read replica from a DB instance in the us-west-2 AWS Region, then your SourceDBInstanceIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can be executed in the source AWS Region.

SourceRegion isn't supported for SQL Server, because SQL Server on Amazon RDS doesn't support cross-region read replicas.

" }, "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", @@ -3625,7 +3637,7 @@ }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether to enable Performance Insights for the Read Replica.

For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

" + "documentation":"

A value that indicates whether to enable Performance Insights for the read replica.

For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide.

" }, "PerformanceInsightsKMSKeyId":{ "shape":"String", @@ -3653,7 +3665,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

The Active Directory directory ID to create the DB instance in.

For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" + "documentation":"

The Active Directory directory ID to create the DB instance in.

For Oracle DB instances, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

" }, "DomainIAMRoleName":{ "shape":"String", @@ -3722,7 +3734,7 @@ }, "EngineFamily":{ "shape":"EngineFamily", - "documentation":"

The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. Currently, this value is always MYSQL. The engine family applies to both RDS MySQL and Aurora MySQL.

" + "documentation":"

The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.

" }, "Auth":{ "shape":"UserAuthConfigList", @@ -4147,11 +4159,11 @@ }, "ReplicationSourceIdentifier":{ "shape":"String", - "documentation":"

Contains the identifier of the source DB cluster if this DB cluster is a Read Replica.

" + "documentation":"

Contains the identifier of the source DB cluster if this DB cluster is a read replica.

" }, "ReadReplicaIdentifiers":{ "shape":"ReadReplicaIdentifierList", - "documentation":"

Contains one or more identifiers of the Read Replicas associated with this DB cluster.

" + "documentation":"

Contains one or more identifiers of the read replicas associated with this DB cluster.

" }, "DBClusterMembers":{ "shape":"DBClusterMemberList", @@ -4219,7 +4231,7 @@ }, "EngineMode":{ "shape":"String", - "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

" + "documentation":"

The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, global, or multimaster.

global engine mode only applies for global database clusters created with Aurora MySQL version 5.6.10a. For higher Aurora MySQL versions, the clusters in a global database use provisioned engine mode. To check if a DB cluster is part of a global database, use DescribeGlobalClusters instead of checking the EngineMode return value from DescribeDBClusters.

" }, "ScalingConfigurationInfo":{"shape":"ScalingConfigurationInfo"}, "DeletionProtection":{ @@ -4257,6 +4269,14 @@ "DomainMemberships":{ "shape":"DomainMembershipList", "documentation":"

The Active Directory Domain membership records associated with the DB cluster.

" + }, + "GlobalWriteForwardingStatus":{ + "shape":"WriteForwardingStatus", + "documentation":"

Specifies whether a secondary cluster in an Aurora global database has write forwarding enabled, not enabled, or is in the process of enabling it.

" + }, + "GlobalWriteForwardingRequested":{ + "shape":"BooleanOptional", + "documentation":"

Specifies whether you have requested to enable write forwarding for a secondary cluster in an Aurora global database. Because write forwarding takes time to enable, check the value of GlobalWriteForwardingStatus to confirm that the request has completed before using the write forwarding feature for this cluster.

" } }, "documentation":"

Contains the details of an Amazon Aurora DB cluster.

This data type is used as a response element in the DescribeDBClusters, StopDBCluster, and StartDBCluster actions.

", @@ -4918,11 +4938,11 @@ }, "SupportsReadReplica":{ "shape":"Boolean", - "documentation":"

Indicates whether the database engine version supports Read Replicas.

" + "documentation":"

Indicates whether the database engine version supports read replicas.

" }, "SupportedEngineModes":{ "shape":"EngineModeList", - "documentation":"

A list of the supported DB engine modes.

" + "documentation":"

A list of the supported DB engine modes.

global engine mode only applies for global database clusters created with Aurora MySQL version 5.6.10a. For higher Aurora MySQL versions, the clusters in a global database use provisioned engine mode.

" }, "SupportedFeatureNames":{ "shape":"FeatureNameList", @@ -4973,7 +4993,7 @@ }, "DBInstanceStatus":{ "shape":"String", - "documentation":"

Specifies the current state of this database.

" + "documentation":"

Specifies the current state of this database.

For information about DB instance statuses, see DB Instance Status in the Amazon RDS User Guide.

" }, "MasterUsername":{ "shape":"String", @@ -5049,15 +5069,15 @@ }, "ReadReplicaSourceDBInstanceIdentifier":{ "shape":"String", - "documentation":"

Contains the identifier of the source DB instance if this DB instance is a Read Replica.

" + "documentation":"

Contains the identifier of the source DB instance if this DB instance is a read replica.

" }, "ReadReplicaDBInstanceIdentifiers":{ "shape":"ReadReplicaDBInstanceIdentifierList", - "documentation":"

Contains one or more identifiers of the Read Replicas associated with this DB instance.

" + "documentation":"

Contains one or more identifiers of the read replicas associated with this DB instance.

" }, "ReadReplicaDBClusterIdentifiers":{ "shape":"ReadReplicaDBClusterIdentifierList", - "documentation":"

Contains one or more identifiers of Aurora DB clusters to which the RDS DB instance is replicated as a Read Replica. For example, when you create an Aurora Read Replica of an RDS MySQL DB instance, the Aurora MySQL DB cluster for the Aurora Read Replica is shown. This output does not contain information about cross region Aurora Read Replicas.

Currently, each RDS DB instance can have only one Aurora Read Replica.

" + "documentation":"

Contains one or more identifiers of Aurora DB clusters to which the RDS DB instance is replicated as a read replica. For example, when you create an Aurora read replica of an RDS MySQL DB instance, the Aurora MySQL DB cluster for the Aurora read replica is shown. This output does not contain information about cross region Aurora read replicas.

Currently, each RDS DB instance can have only one Aurora read replica.

" }, "LicenseModel":{ "shape":"String", @@ -5081,11 +5101,11 @@ }, "PubliclyAccessible":{ "shape":"Boolean", - "documentation":"

Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal instance with a DNS name that resolves to a private IP address.

" + "documentation":"

Specifies the accessibility options for the DB instance.

When the DB instance is publicly accessible, its DNS endpoint resolves to the private IP address from within the DB instance's VPC, and to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses, and that public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

For more information, see CreateDBInstance.

" }, "StatusInfos":{ "shape":"DBInstanceStatusInfoList", - "documentation":"

The status of a Read Replica. If the instance isn't a Read Replica, this is blank.

" + "documentation":"

The status of a read replica. If the instance isn't a read replica, this is blank.

" }, "StorageType":{ "shape":"String", @@ -5458,7 +5478,7 @@ }, "Status":{ "shape":"String", - "documentation":"

Status of the DB instance. For a StatusType of Read Replica, the values can be replicating, replication stop point set, replication stop point reached, error, stopped, or terminated.

" + "documentation":"

Status of the DB instance. For a StatusType of read replica, the values can be replicating, replication stop point set, replication stop point reached, error, stopped, or terminated.

" }, "Message":{ "shape":"String", @@ -5628,7 +5648,7 @@ }, "EngineFamily":{ "shape":"String", - "documentation":"

Currently, this value is always MYSQL. The engine family applies to both RDS MySQL and Aurora MySQL.

" + "documentation":"

The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.

" }, "VpcSecurityGroupIds":{ "shape":"StringList", @@ -5671,7 +5691,7 @@ "documentation":"

The date and time when the proxy was last updated.

" } }, - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

The data structure representing a proxy managed by the RDS Proxy.

This data type is used as a response element in the DescribeDBProxies action.

" + "documentation":"

The data structure representing a proxy managed by the RDS Proxy.

This data type is used as a response element in the DescribeDBProxies action.

" }, "DBProxyAlreadyExistsFault":{ "type":"structure", @@ -5721,7 +5741,10 @@ "incompatible-network", "insufficient-resource-limits", "creating", - "deleting" + "deleting", + "suspended", + "suspending", + "reactivating" ] }, "DBProxyTarget":{ @@ -5750,9 +5773,13 @@ "Type":{ "shape":"TargetType", "documentation":"

Specifies the kind of database, such as an RDS DB instance or an Aurora DB cluster, that the target represents.

" + }, + "TargetHealth":{ + "shape":"TargetHealth", + "documentation":"

Information about the connection health of the RDS Proxy target.

" } }, - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Contains the details for an RDS Proxy target. It represents an RDS DB instance or Aurora DB cluster that the proxy can connect to. One or more targets are associated with an RDS Proxy target group.

This data type is used as a response element in the DescribeDBProxyTargets action.

" + "documentation":"

Contains the details for an RDS Proxy target. It represents an RDS DB instance or Aurora DB cluster that the proxy can connect to. One or more targets are associated with an RDS Proxy target group.

This data type is used as a response element in the DescribeDBProxyTargets action.

" }, "DBProxyTargetAlreadyRegisteredFault":{ "type":"structure", @@ -5802,7 +5829,7 @@ "documentation":"

The date and time when the target group was last updated.

" } }, - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Represents a set of RDS DB instances, Aurora DB clusters, or both that a proxy can connect to. Currently, each target group is associated with exactly one RDS DB instance or Aurora DB cluster.

This data type is used as a response element in the DescribeDBProxyTargetGroups action.

" + "documentation":"

Represents a set of RDS DB instances, Aurora DB clusters, or both that a proxy can connect to. Currently, each target group is associated with exactly one RDS DB instance or Aurora DB cluster.

This data type is used as a response element in the DescribeDBProxyTargetGroups action.

" }, "DBProxyTargetGroupNotFoundFault":{ "type":"structure", @@ -5973,7 +6000,7 @@ }, "SnapshotCreateTime":{ "shape":"TStamp", - "documentation":"

Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC).

" + "documentation":"

Specifies when the snapshot was taken in Coodinated Universal Time (UTC).

" }, "Engine":{ "shape":"String", @@ -6001,7 +6028,7 @@ }, "InstanceCreateTime":{ "shape":"TStamp", - "documentation":"

Specifies the time when the snapshot was taken, in Universal Coordinated Time (UTC).

" + "documentation":"

Specifies the time in Coordinated Universal Time (UTC) when the DB instance, from which the snapshot was taken, was created.

" }, "MasterUsername":{ "shape":"String", @@ -6403,11 +6430,11 @@ }, "SkipFinalSnapshot":{ "shape":"Boolean", - "documentation":"

A value that indicates whether to skip the creation of a final DB snapshot before the DB instance is deleted. If skip is specified, no DB snapshot is created. If skip isn't specified, a DB snapshot is created before the DB instance is deleted. By default, skip isn't specified, and the DB snapshot is created.

When a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when skip is specified.

Specify skip when deleting a Read Replica.

The FinalDBSnapshotIdentifier parameter must be specified if skip isn't specified.

" + "documentation":"

A value that indicates whether to skip the creation of a final DB snapshot before the DB instance is deleted. If skip is specified, no DB snapshot is created. If skip isn't specified, a DB snapshot is created before the DB instance is deleted. By default, skip isn't specified, and the DB snapshot is created.

When a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when skip is specified.

Specify skip when deleting a read replica.

The FinalDBSnapshotIdentifier parameter must be specified if skip isn't specified.

" }, "FinalDBSnapshotIdentifier":{ "shape":"String", - "documentation":"

The DBSnapshotIdentifier of the new DBSnapshot created when the SkipFinalSnapshot parameter is disabled.

Specifying this parameter and also specifying to skip final DB snapshot creation in SkipFinalShapshot results in an error.

Constraints:

  • Must be 1 to 255 letters or numbers.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

  • Can't be specified when deleting a Read Replica.

" + "documentation":"

The DBSnapshotIdentifier of the new DBSnapshot created when the SkipFinalSnapshot parameter is disabled.

Specifying this parameter and also specifying to skip final DB snapshot creation in SkipFinalShapshot results in an error.

Constraints:

  • Must be 1 to 255 letters or numbers.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

  • Can't be specified when deleting a read replica.

" }, "DeleteAutomatedBackups":{ "shape":"BooleanOptional", @@ -7513,6 +7540,10 @@ "shape":"String", "documentation":"

The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.

" }, + "AvailabilityZoneGroup":{ + "shape":"String", + "documentation":"

The Availability Zone group associated with a Local Zone. Specify this parameter to retrieve available offerings for the Local Zones in the group.

Omit this parameter to show the available offerings in the specified AWS Region.

" + }, "Vpc":{ "shape":"BooleanOptional", "documentation":"

A value that indicates whether to show only VPC or non-VPC offerings.

" @@ -7862,7 +7893,10 @@ }, "EngineFamily":{ "type":"string", - "enum":["MYSQL"] + "enum":[ + "MYSQL", + "POSTGRESQL" + ] }, "EngineModeList":{ "type":"list", @@ -8051,7 +8085,7 @@ }, "ExportOnly":{ "shape":"StringList", - "documentation":"

The data exported from the snapshot. Valid values are the following:

  • database - Export all the data of the snapshot.

  • database.table [table-name] - Export a table of the snapshot.

  • database.schema [schema-name] - Export a database schema of the snapshot. This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL.

  • database.schema.table [table-name] - Export a table of the database schema. This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL.

" + "documentation":"

The data exported from the snapshot. Valid values are the following:

  • database - Export all the data from a specified database.

  • database.table table-name - Export a table of the snapshot. This format is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL.

  • database.schema schema-name - Export a database schema of the snapshot. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

  • database.schema.table table-name - Export a table of the database schema. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

" }, "SnapshotTime":{ "shape":"TStamp", @@ -8285,6 +8319,10 @@ "IsWriter":{ "shape":"Boolean", "documentation":"

Specifies whether the Aurora cluster is the primary cluster (that is, has read-write capability) for the Aurora global database with which it is associated.

" + }, + "GlobalWriteForwardingStatus":{ + "shape":"WriteForwardingStatus", + "documentation":"

Specifies whether a secondary cluster in an Aurora global database has write forwarding enabled, not enabled, or is in the process of enabling it.

" } }, "documentation":"

A data structure with information about any primary and secondary clusters associated with an Aurora global database.

", @@ -9016,7 +9054,7 @@ }, "BacktrackWindow":{ "shape":"LongOptional", - "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" + "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Currently, Backtrack is only supported for Aurora MySQL DB clusters.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" }, "CloudwatchLogsExportConfiguration":{ "shape":"CloudwatchLogsExportConfiguration", @@ -9057,6 +9095,10 @@ "CopyTagsToSnapshot":{ "shape":"BooleanOptional", "documentation":"

A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.

" + }, + "EnableGlobalWriteForwarding":{ + "shape":"BooleanOptional", + "documentation":"

A value that indicates whether to enable write operations to be forwarded from this cluster to the primary cluster in an Aurora global database. The resulting changes are replicated back to this cluster. This parameter only applies to DB clusters that are secondary clusters in an Aurora global database. By default, Aurora disallows write operations for secondary clusters.

" } }, "documentation":"

" @@ -9098,7 +9140,7 @@ }, "AttributeName":{ "shape":"String", - "documentation":"

The name of the DB cluster snapshot attribute to modify.

To manage authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this value to restore.

" + "documentation":"

The name of the DB cluster snapshot attribute to modify.

To manage authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this value to restore.

To view the list of attributes available to modify, use the DescribeDBClusterSnapshotAttributes API action.

" }, "ValuesToAdd":{ "shape":"AttributeValueList", @@ -9159,7 +9201,7 @@ }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

Constraints:

  • Must be a value from 0 to 35

  • Can be specified for a MySQL Read Replica only if the source is running MySQL 5.6 or later

  • Can be specified for a PostgreSQL Read Replica only if the source is running PostgreSQL 9.3.5

  • Can't be set to 0 if the DB instance is a source to Read Replicas

" + "documentation":"

The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Changing this parameter can result in an outage if you change from 0 to a non-zero value or from a non-zero value to 0. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible.

Amazon Aurora

Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.

Default: Uses existing setting

Constraints:

  • Must be a value from 0 to 35

  • Can be specified for a MySQL read replica only if the source is running MySQL 5.6 or later

  • Can be specified for a PostgreSQL read replica only if the source is running PostgreSQL 9.3.5

  • Can't be set to 0 if the DB instance is a source to read replicas

" }, "PreferredBackupWindow":{ "shape":"String", @@ -9191,7 +9233,7 @@ }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

The new Provisioned IOPS (I/O operations per second) value for the RDS instance.

Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

Default: Uses existing setting

" + "documentation":"

The new Provisioned IOPS (I/O operations per second) value for the RDS instance.

Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.

If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

Default: Uses existing setting

" }, "OptionGroupName":{ "shape":"String", @@ -9203,7 +9245,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

Specifies the storage type to be associated with the DB instance.

If you specify Provisioned IOPS (io1), you must also include a value for the Iops parameter.

If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.

Valid values: standard | gp2 | io1

Default: io1 if the Iops parameter is specified, otherwise gp2

" + "documentation":"

Specifies the storage type to be associated with the DB instance.

If you specify Provisioned IOPS (io1), you must also include a value for the Iops parameter.

If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance.

Valid values: standard | gp2 | io1

Default: io1 if the Iops parameter is specified, otherwise gp2

" }, "TdeCredentialArn":{ "shape":"String", @@ -9219,7 +9261,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

The Active Directory directory ID to move the DB instance to. Specify none to remove the instance from its current domain. The domain must be created prior to this operation. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" + "documentation":"

The Active Directory directory ID to move the DB instance to. Specify none to remove the instance from its current domain. The domain must be created prior to this operation. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instances, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", @@ -9231,11 +9273,11 @@ }, "DBPortNumber":{ "shape":"IntegerOptional", - "documentation":"

The port number on which the database accepts connections.

The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.

Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter.

MySQL

Default: 3306

Valid Values: 1150-65535

MariaDB

Default: 3306

Valid Values: 1150-65535

PostgreSQL

Default: 5432

Valid Values: 1150-65535

Type: Integer

Oracle

Default: 1521

Valid Values: 1150-65535

SQL Server

Default: 1433

Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through 49156.

Amazon Aurora

Default: 3306

Valid Values: 1150-65535

" + "documentation":"

The port number on which the database accepts connections.

The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance.

Your database will restart when you change the DBPortNumber value regardless of the value of the ApplyImmediately parameter.

MySQL

Default: 3306

Valid values: 1150-65535

MariaDB

Default: 3306

Valid values: 1150-65535

PostgreSQL

Default: 5432

Valid values: 1150-65535

Type: Integer

Oracle

Default: 1521

Valid values: 1150-65535

SQL Server

Default: 1433

Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156.

Amazon Aurora

Default: 3306

Valid values: 1150-65535

" }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance isn't publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address.

PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.

Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter.

" + "documentation":"

A value that indicates whether the DB instance is publicly accessible.

When the DB instance is publicly accessible, its DNS endpoint resolves to the private IP address from within the DB instance's VPC, and to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses, and that public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.

Changes to the PubliclyAccessible parameter are applied immediately regardless of the value of the ApplyImmediately parameter.

" }, "MonitoringRoleArn":{ "shape":"String", @@ -9410,7 +9452,7 @@ }, "AttributeName":{ "shape":"String", - "documentation":"

The name of the DB snapshot attribute to modify.

To manage authorization for other AWS accounts to copy or restore a manual DB snapshot, set this value to restore.

" + "documentation":"

The name of the DB snapshot attribute to modify.

To manage authorization for other AWS accounts to copy or restore a manual DB snapshot, set this value to restore.

To view the list of attributes available to modify, use the DescribeDBSnapshotAttributes API action.

" }, "ValuesToAdd":{ "shape":"AttributeValueList", @@ -10018,6 +10060,10 @@ "shape":"String", "documentation":"

The license model for a DB instance.

" }, + "AvailabilityZoneGroup":{ + "shape":"String", + "documentation":"

The Availability Zone group for a DB instance.

" + }, "AvailabilityZones":{ "shape":"AvailabilityZoneList", "documentation":"

A list of Availability Zones for a DB instance.

" @@ -10028,7 +10074,7 @@ }, "ReadReplicaCapable":{ "shape":"Boolean", - "documentation":"

Indicates whether a DB instance can have a Read Replica.

" + "documentation":"

Indicates whether a DB instance can have a read replica.

" }, "Vpc":{ "shape":"Boolean", @@ -10088,15 +10134,19 @@ }, "SupportedEngineModes":{ "shape":"EngineModeList", - "documentation":"

A list of the supported DB engine modes.

" + "documentation":"

A list of the supported DB engine modes.

global engine mode only applies for global database clusters created with Aurora MySQL version 5.6.10a. For higher Aurora MySQL versions, the clusters in a global database use provisioned engine mode.

" }, "SupportsStorageAutoscaling":{ "shape":"BooleanOptional", - "documentation":"

Whether or not Amazon RDS can automatically scale storage for DB instances that use the specified instance class.

" + "documentation":"

Whether Amazon RDS can automatically scale storage for DB instances that use the specified DB instance class.

" }, "SupportsKerberosAuthentication":{ "shape":"BooleanOptional", "documentation":"

Whether a DB instance supports Kerberos Authentication.

" + }, + "OutpostCapable":{ + "shape":"Boolean", + "documentation":"

Whether a DB instance supports RDS on Outposts.

For more information about RDS on Outposts, see Amazon RDS on AWS Outposts in the Amazon RDS User Guide.

" } }, "documentation":"

Contains a list of available options for a DB instance.

This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

", @@ -10123,6 +10173,16 @@ }, "documentation":"

Contains the result of a successful invocation of the DescribeOrderableDBInstanceOptions action.

" }, + "Outpost":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Outpost.

" + } + }, + "documentation":"

A data type that represents an Outpost.

For more information about RDS on Outposts, see Amazon RDS on AWS Outposts in the Amazon RDS User Guide.

" + }, "Parameter":{ "type":"structure", "members":{ @@ -10354,7 +10414,7 @@ "members":{ "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The identifier of the DB cluster Read Replica to promote. This parameter isn't case-sensitive.

Constraints:

  • Must match the identifier of an existing DBCluster Read Replica.

Example: my-cluster-replica1

" + "documentation":"

The identifier of the DB cluster read replica to promote. This parameter isn't case-sensitive.

Constraints:

  • Must match the identifier of an existing DB cluster read replica.

Example: my-cluster-replica1

" } }, "documentation":"

" @@ -10371,11 +10431,11 @@ "members":{ "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

The DB instance identifier. This value is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing Read Replica DB instance.

Example: mydbinstance

" + "documentation":"

The DB instance identifier. This value is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing read replica DB instance.

Example: mydbinstance

" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", - "documentation":"

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Default: 1

Constraints:

  • Must be a value from 0 to 35.

  • Can't be set to 0 if the DB instance is a source to Read Replicas.

" + "documentation":"

The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

Default: 1

Constraints:

  • Must be a value from 0 to 35.

  • Can't be set to 0 if the DB instance is a source to read replicas.

" }, "PreferredBackupWindow":{ "shape":"String", @@ -10960,7 +11020,7 @@ }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The name of the DB cluster to create from the source data in the Amazon S3 bucket. This parameter is isn't case-sensitive.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

Example: my-cluster1

" + "documentation":"

The name of the DB cluster to create from the source data in the Amazon S3 bucket. This parameter isn't case-sensitive.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

Example: my-cluster1

" }, "DBClusterParameterGroupName":{ "shape":"String", @@ -11025,7 +11085,7 @@ }, "SourceEngineVersion":{ "shape":"String", - "documentation":"

The version of the database that the backup files were created from.

MySQL version 5.5 and 5.6 are supported.

Example: 5.6.22

" + "documentation":"

The version of the database that the backup files were created from.

MySQL versions 5.5, 5.6, and 5.7 are supported.

Example: 5.6.40

" }, "S3BucketName":{ "shape":"String", @@ -11041,7 +11101,7 @@ }, "BacktrackWindow":{ "shape":"LongOptional", - "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" + "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Currently, Backtrack is only supported for Aurora MySQL DB clusters.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -11057,7 +11117,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation.

For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster. For more information, see Using Kerberos Authentication for Aurora MySQL in the Amazon Aurora User Guide.

" + "documentation":"

Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation.

For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster. For more information, see Kerberos Authentication in the Amazon Aurora User Guide.

" }, "DomainIAMRoleName":{ "shape":"String", @@ -11133,7 +11193,7 @@ }, "BacktrackWindow":{ "shape":"LongOptional", - "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" + "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Currently, Backtrack is only supported for Aurora MySQL DB clusters.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -11230,7 +11290,7 @@ }, "BacktrackWindow":{ "shape":"LongOptional", - "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" + "documentation":"

The target backtrack window, in seconds. To disable backtracking, set this value to 0.

Currently, Backtrack is only supported for Aurora MySQL DB clusters.

Default: 0

Constraints:

  • If specified, this value must be set to a number from 0 to 259,200 (72 hours).

" }, "EnableCloudwatchLogsExports":{ "shape":"LogTypeList", @@ -11250,7 +11310,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation.

For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster. For more information, see Using Kerberos Authentication for Aurora MySQL in the Amazon Aurora User Guide.

" + "documentation":"

Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation.

For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB cluster. For more information, see Kerberos Authentication in the Amazon Aurora User Guide.

" }, "DomainIAMRoleName":{ "shape":"String", @@ -11302,7 +11362,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance isn't publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.

" + "documentation":"

A value that indicates whether the DB instance is publicly accessible.

When the DB instance is publicly accessible, its DNS endpoint resolves to the private IP address from within the DB instance's VPC, and to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses, and that public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

For more information, see CreateDBInstance.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -11347,7 +11407,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

Specify the Active Directory directory ID to restore the DB instance in. The domain must be created prior to this operation. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" + "documentation":"

Specify the Active Directory directory ID to restore the DB instance in. The domain must be created prior to this operation. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instances, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" }, "CopyTagsToSnapshot":{ "shape":"BooleanOptional", @@ -11492,7 +11552,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance isn't publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.

" + "documentation":"

A value that indicates whether the DB instance is publicly accessible.

When the DB instance is publicly accessible, its DNS endpoint resolves to the private IP address from within the DB instance's VPC, and to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses, and that public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

For more information, see CreateDBInstance.

" }, "Tags":{ "shape":"TagList", @@ -11532,7 +11592,7 @@ }, "SourceEngineVersion":{ "shape":"String", - "documentation":"

The engine version of your source database.

Valid Values: 5.6

" + "documentation":"

The version of the database that the backup files were created from.

MySQL versions 5.6 and 5.7 are supported.

Example: 5.6.40

" }, "S3BucketName":{ "shape":"String", @@ -11624,7 +11684,7 @@ }, "PubliclyAccessible":{ "shape":"BooleanOptional", - "documentation":"

A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance isn't publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.

" + "documentation":"

A value that indicates whether the DB instance is publicly accessible.

When the DB instance is publicly accessible, its DNS endpoint resolves to the private IP address from within the DB instance's VPC, and to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses, and that public access is not permitted if the security group assigned to the DB instance doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

For more information, see CreateDBInstance.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -11673,7 +11733,7 @@ }, "Domain":{ "shape":"String", - "documentation":"

Specify the Active Directory directory ID to restore the DB instance in. The domain must be created prior to this operation. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" + "documentation":"

Specify the Active Directory directory ID to restore the DB instance in. The domain must be created prior to this operation. Currently, only Microsoft SQL Server and Oracle DB instances can be created in an Active Directory Domain.

For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication to authenticate users that connect to the DB instance. For more information, see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft SQL Server in the Amazon RDS User Guide.

For Oracle DB instances, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB instance. For more information, see Using Kerberos Authentication with Amazon RDS for Oracle in the Amazon RDS User Guide.

" }, "DomainIAMRoleName":{ "shape":"String", @@ -11928,7 +11988,7 @@ }, "SourceRegions":{ "shape":"SourceRegionList", - "documentation":"

A list of SourceRegion instances that contains each source AWS Region that the current AWS Region can get a Read Replica or a DB snapshot from.

" + "documentation":"

A list of SourceRegion instances that contains each source AWS Region that the current AWS Region can get a read replica or a DB snapshot from.

" } }, "documentation":"

Contains the result of a successful invocation of the DescribeSourceRegions action.

" @@ -12063,7 +12123,7 @@ }, "ExportOnly":{ "shape":"StringList", - "documentation":"

The data to be exported from the snapshot. If this parameter is not provided, all the snapshot data is exported. Valid values are the following:

  • database - Export all the data of the snapshot.

  • database.table [table-name] - Export a table of the snapshot.

  • database.schema [schema-name] - Export a database schema of the snapshot. This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL.

  • database.schema.table [table-name] - Export a table of the database schema. This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL.

" + "documentation":"

The data to be exported from the snapshot. If this parameter is not provided, all the snapshot data is exported. Valid values are the following:

  • database - Export all the data from a specified database.

  • database.table table-name - Export a table of the snapshot. This format is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL.

  • database.schema schema-name - Export a database schema of the snapshot. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

  • database.schema.table table-name - Export a table of the database schema. This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL.

" } } }, @@ -12172,15 +12232,19 @@ "members":{ "SubnetIdentifier":{ "shape":"String", - "documentation":"

Specifies the identifier of the subnet.

" + "documentation":"

The identifier of the subnet.

" }, "SubnetAvailabilityZone":{"shape":"AvailabilityZone"}, + "SubnetOutpost":{ + "shape":"Outpost", + "documentation":"

If the subnet is associated with an Outpost, this value specifies the Outpost.

For more information about RDS on Outposts, see Amazon RDS on AWS Outposts in the Amazon RDS User Guide.

" + }, "SubnetStatus":{ "shape":"String", - "documentation":"

Specifies the status of the subnet.

" + "documentation":"

The status of the subnet.

" } }, - "documentation":"

This data type is used as a response element in the DescribeDBSubnetGroups action.

" + "documentation":"

This data type is used as a response element for the DescribeDBSubnetGroups operation.

" }, "SubnetAlreadyInUse":{ "type":"structure", @@ -12295,10 +12359,45 @@ "type":"list", "member":{"shape":"DBProxyTargetGroup"} }, + "TargetHealth":{ + "type":"structure", + "members":{ + "State":{ + "shape":"TargetState", + "documentation":"

The current state of the connection health lifecycle for the RDS Proxy target. The following is a typical lifecycle example for the states of an RDS Proxy target:

registering > unavailable > available > unavailable > available

" + }, + "Reason":{ + "shape":"TargetHealthReason", + "documentation":"

The reason for the current health State of the RDS Proxy target.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description of the health of the RDS Proxy target. If the State is AVAILABLE, a description is not included.

" + } + }, + "documentation":"

Information about the connection health of an RDS Proxy target.

" + }, + "TargetHealthReason":{ + "type":"string", + "enum":[ + "UNREACHABLE", + "CONNECTION_FAILED", + "AUTH_FAILURE", + "PENDING_PROXY_CAPACITY" + ] + }, "TargetList":{ "type":"list", "member":{"shape":"DBProxyTarget"} }, + "TargetState":{ + "type":"string", + "enum":[ + "REGISTERING", + "AVAILABLE", + "UNAVAILABLE" + ] + }, "TargetType":{ "type":"string", "enum":[ @@ -12367,7 +12466,7 @@ "documentation":"

Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy.

" } }, - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Specifies the details of authentication used by a proxy to log in as a specific database user.

" + "documentation":"

Specifies the details of authentication used by a proxy to log in as a specific database user.

" }, "UserAuthConfigInfo":{ "type":"structure", @@ -12393,7 +12492,7 @@ "documentation":"

Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy.

" } }, - "documentation":"

This is prerelease documentation for the RDS Database Proxy feature in preview release. It is subject to change.

Returns the details of authentication used by a proxy to log in as a specific database user.

" + "documentation":"

Returns the details of authentication used by a proxy to log in as a specific database user.

" }, "UserAuthConfigInfoList":{ "type":"list", @@ -12515,6 +12614,16 @@ } }, "documentation":"

Information about the virtual private network (VPN) between the VMware vSphere cluster and the AWS website.

For more information about RDS on VMware, see the RDS on VMware User Guide.

" + }, + "WriteForwardingStatus":{ + "type":"string", + "enum":[ + "enabled", + "disabled", + "enabling", + "disabling", + "unknown" + ] } }, "documentation":"Amazon Relational Database Service

Amazon Relational Database Service (Amazon RDS) is a web service that makes it easier to set up, operate, and scale a relational database in the cloud. It provides cost-efficient, resizeable capacity for an industry-standard relational database and manages common database administration tasks, freeing up developers to focus on what makes their applications and businesses unique.

Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities mean that the code, applications, and tools you already use today with your existing databases work with Amazon RDS without modification. Amazon RDS automatically backs up your database and maintains the database software that powers your DB instance. Amazon RDS is flexible: you can scale your DB instance's compute resources and storage capacity to meet your application's demand. As with all Amazon Web Services, there are no up-front investments, and you pay only for the resources you use.

This interface reference for Amazon RDS contains documentation for a programming or command line interface you can use to manage Amazon RDS. Amazon RDS is asynchronous, which means that some interfaces might require techniques such as polling or callback functions to determine when a command has been applied. In this reference, the parameter descriptions indicate whether a command is applied immediately, on the next instance reboot, or during the maintenance window. The reference structure is as follows, and we list following some related topics from the user guide.

Amazon RDS API Reference

Amazon RDS User Guide

" diff --git a/services/rds/src/main/resources/software/amazon/awssdk/services/rds/execution.interceptors b/services/rds/src/main/resources/software/amazon/awssdk/services/rds/execution.interceptors index 04960dce76d2..332113d86951 100644 --- a/services/rds/src/main/resources/software/amazon/awssdk/services/rds/execution.interceptors +++ b/services/rds/src/main/resources/software/amazon/awssdk/services/rds/execution.interceptors @@ -1,2 +1,4 @@ +software.amazon.awssdk.services.rds.internal.CopyDbClusterSnapshotPresignInterceptor software.amazon.awssdk.services.rds.internal.CopyDbSnapshotPresignInterceptor -software.amazon.awssdk.services.rds.internal.CreateDbInstanceReadReplicaPresignInterceptor +software.amazon.awssdk.services.rds.internal.CreateDbClusterPresignInterceptor +software.amazon.awssdk.services.rds.internal.CreateDbInstanceReadReplicaPresignInterceptor \ No newline at end of file diff --git a/services/rds/src/test/java/software/amazon/awssdk/services/rds/internal/PresignRequestHandlerTest.java b/services/rds/src/test/java/software/amazon/awssdk/services/rds/internal/PresignRequestHandlerTest.java index cc62a7007749..e76dd522eacf 100644 --- a/services/rds/src/test/java/software/amazon/awssdk/services/rds/internal/PresignRequestHandlerTest.java +++ b/services/rds/src/test/java/software/amazon/awssdk/services/rds/internal/PresignRequestHandlerTest.java @@ -40,8 +40,6 @@ import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.rds.internal.CopyDbSnapshotPresignInterceptor; -import software.amazon.awssdk.services.rds.internal.RdsPresignInterceptor; import software.amazon.awssdk.services.rds.model.CopyDbSnapshotRequest; import software.amazon.awssdk.services.rds.model.RdsRequest; import software.amazon.awssdk.services.rds.transform.CopyDbSnapshotRequestMarshaller; @@ -163,7 +161,8 @@ private SdkHttpFullRequest marshallRequest(CopyDbSnapshotRequest request) { } private ExecutionAttributes executionAttributes() { - return new ExecutionAttributes().putAttribute(AwsSignerExecutionAttribute.AWS_CREDENTIALS, CREDENTIALS); + return new ExecutionAttributes().putAttribute(AwsSignerExecutionAttribute.AWS_CREDENTIALS, CREDENTIALS) + .putAttribute(AwsSignerExecutionAttribute.SIGNING_REGION, DESTINATION_REGION); } private CopyDbSnapshotRequest makeTestRequest() { diff --git a/services/rds/src/test/java/software/amazon/awssdk/services/rds/internal/PresignRequestWireMockTest.java b/services/rds/src/test/java/software/amazon/awssdk/services/rds/internal/PresignRequestWireMockTest.java new file mode 100644 index 000000000000..b8891324fa2a --- /dev/null +++ b/services/rds/src/test/java/software/amazon/awssdk/services/rds/internal/PresignRequestWireMockTest.java @@ -0,0 +1,101 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.rds.internal; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.any; +import static com.github.tomakehurst.wiremock.client.WireMock.anyRequestedFor; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.findAll; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import com.github.tomakehurst.wiremock.verification.LoggedRequest; +import java.net.URI; +import java.util.List; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.rds.RdsClient; + +@RunWith(MockitoJUnitRunner.class) +public class PresignRequestWireMockTest { + @ClassRule + public static final WireMockRule WIRE_MOCK = new WireMockRule(0); + + public static RdsClient client; + + @BeforeClass + public static void setup() { + client = RdsClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:" + WIRE_MOCK.port())) + .build(); + } + + @Before + public void reset() { + WIRE_MOCK.resetAll(); + } + + @Test + public void copyDbClusterSnapshotWithSourceRegionSendsPresignedUrl() { + verifyMethodCallSendsPresignedUrl(() -> client.copyDBClusterSnapshot(r -> r.sourceRegion("us-west-2")), + "CopyDBClusterSnapshot"); + } + + @Test + public void copyDBSnapshotWithSourceRegionSendsPresignedUrl() { + verifyMethodCallSendsPresignedUrl(() -> client.copyDBSnapshot(r -> r.sourceRegion("us-west-2")), + "CopyDBSnapshot"); + } + + @Test + public void createDbClusterWithSourceRegionSendsPresignedUrl() { + verifyMethodCallSendsPresignedUrl(() -> client.createDBCluster(r -> r.sourceRegion("us-west-2")), + "CreateDBCluster"); + } + + @Test + public void createDBInstanceReadReplicaWithSourceRegionSendsPresignedUrl() { + verifyMethodCallSendsPresignedUrl(() -> client.createDBInstanceReadReplica(r -> r.sourceRegion("us-west-2")), + "CreateDBInstanceReadReplica"); + } + + public void verifyMethodCallSendsPresignedUrl(Runnable methodCall, String actionName) { + stubFor(any(anyUrl()).willReturn(aResponse().withStatus(200).withBody(""))); + + methodCall.run(); + + List requests = findAll(anyRequestedFor(anyUrl())); + + assertThat(requests).isNotEmpty(); + + LoggedRequest lastRequest = requests.get(0); + String lastRequestBody = new String(lastRequest.getBody(), UTF_8); + assertThat(lastRequestBody).contains("PreSignedUrl=https%3A%2F%2Frds.us-west-2.amazonaws.com%3FAction%3D" + actionName + + "%26Version%3D2014-10-31%26DestinationRegion%3Dus-east-1%26"); + } +} diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index b659222e2efb..e62996974b05 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index 74f31b7f46cb..4a12c3667465 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshift/src/main/resources/codegen-resources/paginators-1.json b/services/redshift/src/main/resources/codegen-resources/paginators-1.json index e9690c816d8a..b72738fbece6 100644 --- a/services/redshift/src/main/resources/codegen-resources/paginators-1.json +++ b/services/redshift/src/main/resources/codegen-resources/paginators-1.json @@ -101,6 +101,12 @@ "limit_key": "MaxRecords", "output_token": "Marker", "result_key": "ScheduledActions" + }, + "DescribeUsageLimits": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "UsageLimits" } } } \ No newline at end of file diff --git a/services/redshift/src/main/resources/codegen-resources/service-2.json b/services/redshift/src/main/resources/codegen-resources/service-2.json index e4340a85034b..c916838ebf92 100644 --- a/services/redshift/src/main/resources/codegen-resources/service-2.json +++ b/services/redshift/src/main/resources/codegen-resources/service-2.json @@ -389,7 +389,7 @@ {"shape":"TagLimitExceededFault"}, {"shape":"ScheduleDefinitionTypeUnsupportedFault"} ], - "documentation":"

Creates a snapshot schedule with the rate of every 12 hours.

" + "documentation":"

Create a snapshot schedule that can be associated to a cluster and which overrides the default system backup schedule.

" }, "CreateTags":{ "name":"CreateTags", @@ -405,6 +405,28 @@ ], "documentation":"

Adds tags to a cluster.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, you will receive an error and the attempt will fail.

If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

" }, + "CreateUsageLimit":{ + "name":"CreateUsageLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUsageLimitMessage"}, + "output":{ + "shape":"UsageLimit", + "resultWrapper":"CreateUsageLimitResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"InvalidClusterStateFault"}, + {"shape":"LimitExceededFault"}, + {"shape":"UsageLimitAlreadyExistsFault"}, + {"shape":"InvalidUsageLimitFault"}, + {"shape":"TagLimitExceededFault"}, + {"shape":"UnsupportedOperationFault"} + ], + "documentation":"

Creates a usage limit for a specified Amazon Redshift feature on a cluster. The usage limit is identified by the returned usage limit identifier.

" + }, "DeleteCluster":{ "name":"DeleteCluster", "http":{ @@ -573,6 +595,19 @@ ], "documentation":"

Deletes tags from a resource. You must provide the ARN of the resource from which you want to delete the tag or tags.

" }, + "DeleteUsageLimit":{ + "name":"DeleteUsageLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteUsageLimitMessage"}, + "errors":[ + {"shape":"UsageLimitNotFoundFault"}, + {"shape":"UnsupportedOperationFault"} + ], + "documentation":"

Deletes a usage limit from a cluster.

" + }, "DescribeAccountAttributes":{ "name":"DescribeAccountAttributes", "http":{ @@ -1018,6 +1053,23 @@ ], "documentation":"

Returns a list of tags. You can return tags from a specific resource by specifying an ARN, or you can return all tags for a given type of resource, such as clusters, snapshots, and so on.

The following are limitations for DescribeTags:

  • You cannot specify an ARN and a resource-type value together in the same request.

  • You cannot use the MaxRecords and Marker parameters together with the ARN parameter.

  • The MaxRecords parameter can be a range from 10 to 50 results to return in a request.

If you specify both tag keys and tag values in the same request, Amazon Redshift returns all resources that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all resources that have any combination of those values are returned.

If both tag keys and values are omitted from the request, resources are returned regardless of whether they have tag keys or values associated with them.

" }, + "DescribeUsageLimits":{ + "name":"DescribeUsageLimits", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeUsageLimitsMessage"}, + "output":{ + "shape":"UsageLimitList", + "resultWrapper":"DescribeUsageLimitsResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"UnsupportedOperationFault"} + ], + "documentation":"

Shows usage limits on a cluster. Results are filtered based on the combination of input usage limit identifier, cluster identifier, and feature type parameters:

  • If usage limit identifier, cluster identifier, and feature type are not provided, then all usage limit objects for the current account in the current region are returned.

  • If usage limit identifier is provided, then the corresponding usage limit object is returned.

  • If cluster identifier is provided, then all usage limit objects for the specified cluster are returned.

  • If cluster identifier and feature type are provided, then all usage limit objects for the combination of cluster and feature are returned.

" + }, "DisableLogging":{ "name":"DisableLogging", "http":{ @@ -1376,6 +1428,24 @@ ], "documentation":"

Modifies a snapshot schedule. Any schedule associated with a cluster is modified asynchronously.

" }, + "ModifyUsageLimit":{ + "name":"ModifyUsageLimit", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyUsageLimitMessage"}, + "output":{ + "shape":"UsageLimit", + "resultWrapper":"ModifyUsageLimitResult" + }, + "errors":[ + {"shape":"InvalidUsageLimitFault"}, + {"shape":"UsageLimitNotFoundFault"}, + {"shape":"UnsupportedOperationFault"} + ], + "documentation":"

Modifies a usage limit in a cluster. You can't modify the feature type or period of a usage limit.

" + }, "PauseCluster":{ "name":"PauseCluster", "http":{ @@ -1468,7 +1538,7 @@ {"shape":"UnauthorizedOperation"}, {"shape":"LimitExceededFault"} ], - "documentation":"

Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method.

Elastic resize operations have the following restrictions:

  • You can only resize clusters of the following types:

    • dc2.large

    • dc2.8xlarge

    • ds2.xlarge

    • ds2.8xlarge

    • ra3.16xlarge

  • The type of nodes that you add must match the node type for the cluster.

" + "documentation":"

Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method.

Elastic resize operations have the following restrictions:

  • You can only resize clusters of the following types:

    • dc2.large

    • dc2.8xlarge

    • ds2.xlarge

    • ds2.8xlarge

    • ra3.4xlarge

    • ra3.16xlarge

  • The type of nodes that you add must match the node type for the cluster.

" }, "RestoreFromClusterSnapshot":{ "name":"RestoreFromClusterSnapshot", @@ -2865,7 +2935,7 @@ }, "NodeType":{ "shape":"String", - "documentation":"

The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.16xlarge

" + "documentation":"

The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.4xlarge | ra3.16xlarge

" }, "MasterUsername":{ "shape":"String", @@ -3338,6 +3408,45 @@ }, "documentation":"

Contains the output from the CreateTags action.

" }, + "CreateUsageLimitMessage":{ + "type":"structure", + "required":[ + "ClusterIdentifier", + "FeatureType", + "LimitType", + "Amount" + ], + "members":{ + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the cluster that you want to limit usage.

" + }, + "FeatureType":{ + "shape":"UsageLimitFeatureType", + "documentation":"

The Amazon Redshift feature that you want to limit.

" + }, + "LimitType":{ + "shape":"UsageLimitLimitType", + "documentation":"

The type of limit. Depending on the feature type, this can be based on a time duration or data size. If FeatureType is spectrum, then LimitType must be data-scanned. If FeatureType is concurrency-scaling, then LimitType must be time.

" + }, + "Amount":{ + "shape":"Long", + "documentation":"

The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB). The value must be a positive number.

" + }, + "Period":{ + "shape":"UsageLimitPeriod", + "documentation":"

The time period that the amount applies to. A weekly period begins on Sunday. The default is monthly.

" + }, + "BreachAction":{ + "shape":"UsageLimitBreachAction", + "documentation":"

The action that Amazon Redshift takes when the limit is reached. The default is log. For more information about this parameter, see UsageLimit.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag instances.

" + } + } + }, "CustomerStorageMessage":{ "type":"structure", "members":{ @@ -3604,6 +3713,16 @@ }, "documentation":"

Contains the output from the DeleteTags action.

" }, + "DeleteUsageLimitMessage":{ + "type":"structure", + "required":["UsageLimitId"], + "members":{ + "UsageLimitId":{ + "shape":"String", + "documentation":"

The identifier of the usage limit to delete.

" + } + } + }, "DependentServiceRequestThrottlingFault":{ "type":"structure", "members":{ @@ -4293,6 +4412,39 @@ }, "documentation":"

" }, + "DescribeUsageLimitsMessage":{ + "type":"structure", + "members":{ + "UsageLimitId":{ + "shape":"String", + "documentation":"

The identifier of the usage limit to describe.

" + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the cluster for which you want to describe usage limits.

" + }, + "FeatureType":{ + "shape":"UsageLimitFeatureType", + "documentation":"

The feature type for which you want to describe usage limits.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

Default: 100

Constraints: minimum 20, maximum 100.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeUsageLimits request exceed the value specified in MaxRecords, AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

A tag key or keys for which you want to return all matching usage limit objects that are associated with the specified key or keys. For example, suppose that you have parameter groups that are tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the usage limit objects have either or both of these tag keys associated with them.

" + }, + "TagValues":{ + "shape":"TagValueList", + "documentation":"

A tag value or values for which you want to return all matching usage limit objects that are associated with the specified tag value or values. For example, suppose that you have parameter groups that are tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with the usage limit objects that have either or both of these tag values associated with them.

" + } + } + }, "DisableLoggingMessage":{ "type":"structure", "required":["ClusterIdentifier"], @@ -5267,6 +5419,18 @@ }, "exception":true }, + "InvalidUsageLimitFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The usage limit is not valid.

", + "error":{ + "code":"InvalidUsageLimit", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidVPCNetworkStateFault":{ "type":"structure", "members":{ @@ -5446,7 +5610,7 @@ }, "NodeType":{ "shape":"String", - "documentation":"

The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter.

For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.16xlarge

" + "documentation":"

The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter.

For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide.

Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.4xlarge | ra3.16xlarge

" }, "NumberOfNodes":{ "shape":"IntegerOptional", @@ -5745,6 +5909,24 @@ } } }, + "ModifyUsageLimitMessage":{ + "type":"structure", + "required":["UsageLimitId"], + "members":{ + "UsageLimitId":{ + "shape":"String", + "documentation":"

The identifier of the usage limit to modify.

" + }, + "Amount":{ + "shape":"LongOptional", + "documentation":"

The new limit amount. For more information about this parameter, see UsageLimit.

" + }, + "BreachAction":{ + "shape":"UsageLimitBreachAction", + "documentation":"

The new action that Amazon Redshift takes when the limit is reached. For more information about this parameter, see UsageLimit.

" + } + } + }, "NodeConfigurationOption":{ "type":"structure", "members":{ @@ -6557,7 +6739,7 @@ }, "NodeType":{ "shape":"String", - "documentation":"

The node type that the restored cluster will be provisioned with.

Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge into ds2.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type or dc2.large instance type. You can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlareg cluster, then resize to a dc2.8large cluster. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.

" + "documentation":"

The node type that the restored cluster will be provisioned with.

Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge into ds2.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type or dc2.large instance type. You can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlarge cluster, then resize to a dc2.8large cluster. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.

" }, "EnhancedVpcRouting":{ "shape":"BooleanOptional", @@ -7931,6 +8113,115 @@ }, "documentation":"

A maintenance track that you can switch the current track to.

" }, + "UsageLimit":{ + "type":"structure", + "members":{ + "UsageLimitId":{ + "shape":"String", + "documentation":"

The identifier of the usage limit.

" + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the cluster with a usage limit.

" + }, + "FeatureType":{ + "shape":"UsageLimitFeatureType", + "documentation":"

The Amazon Redshift feature to which the limit applies.

" + }, + "LimitType":{ + "shape":"UsageLimitLimitType", + "documentation":"

The type of limit. Depending on the feature type, this can be based on a time duration or data size.

" + }, + "Amount":{ + "shape":"Long", + "documentation":"

The limit amount. If time-based, this amount is in minutes. If data-based, this amount is in terabytes (TB).

" + }, + "Period":{ + "shape":"UsageLimitPeriod", + "documentation":"

The time period that the amount applies to. A weekly period begins on Sunday. The default is monthly.

" + }, + "BreachAction":{ + "shape":"UsageLimitBreachAction", + "documentation":"

The action that Amazon Redshift takes when the limit is reached. Possible values are:

  • log - To log an event in a system table. The default is log.

  • emit-metric - To emit CloudWatch metrics.

  • disable - To disable the feature until the next usage period begins.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tag instances.

" + } + }, + "documentation":"

Describes a usage limit object for a cluster.

" + }, + "UsageLimitAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The usage limit already exists.

", + "error":{ + "code":"UsageLimitAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UsageLimitBreachAction":{ + "type":"string", + "enum":[ + "log", + "emit-metric", + "disable" + ] + }, + "UsageLimitFeatureType":{ + "type":"string", + "enum":[ + "spectrum", + "concurrency-scaling" + ] + }, + "UsageLimitLimitType":{ + "type":"string", + "enum":[ + "time", + "data-scanned" + ] + }, + "UsageLimitList":{ + "type":"structure", + "members":{ + "UsageLimits":{ + "shape":"UsageLimits", + "documentation":"

Contains the output from the DescribeUsageLimits action.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

" + } + } + }, + "UsageLimitNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The usage limit identifier can't be found.

", + "error":{ + "code":"UsageLimitNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "UsageLimitPeriod":{ + "type":"string", + "enum":[ + "daily", + "weekly", + "monthly" + ] + }, + "UsageLimits":{ + "type":"list", + "member":{"shape":"UsageLimit"} + }, "ValueStringList":{ "type":"list", "member":{ diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index 7273676d7080..b42431757228 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/rekognition/src/main/resources/codegen-resources/paginators-1.json b/services/rekognition/src/main/resources/codegen-resources/paginators-1.json index 0cca435bb35f..b74b5cc55360 100644 --- a/services/rekognition/src/main/resources/codegen-resources/paginators-1.json +++ b/services/rekognition/src/main/resources/codegen-resources/paginators-1.json @@ -42,6 +42,11 @@ "limit_key": "MaxResults", "output_token": "NextToken" }, + "GetSegmentDetection": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, "GetTextDetection": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/rekognition/src/main/resources/codegen-resources/service-2.json b/services/rekognition/src/main/resources/codegen-resources/service-2.json index 0ed7861c2ead..92681912136a 100644 --- a/services/rekognition/src/main/resources/codegen-resources/service-2.json +++ b/services/rekognition/src/main/resources/codegen-resources/service-2.json @@ -161,7 +161,7 @@ {"shape":"ThrottlingException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Deletes an Amazon Rekognition Custom Labels project. To delete a project you must first delete all versions of the model associated with the project. To delete a version of a model, see DeleteProjectVersion.

This operation requires permissions to perform the rekognition:DeleteProject action.

" + "documentation":"

Deletes an Amazon Rekognition Custom Labels project. To delete a project you must first delete all models associated with the project. To delete a model, see DeleteProjectVersion.

This operation requires permissions to perform the rekognition:DeleteProject action.

" }, "DeleteProjectVersion":{ "name":"DeleteProjectVersion", @@ -180,7 +180,7 @@ {"shape":"ThrottlingException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

Deletes a version of a model.

You must first stop the model before you can delete it. To check if a model is running, use the Status field returned from DescribeProjectVersions. To stop a running model call StopProjectVersion.

This operation requires permissions to perform the rekognition:DeleteProjectVersion action.

" + "documentation":"

Deletes an Amazon Rekognition Custom Labels model.

You can't delete a model if it is running or if it is training. To check the status of a model, use the Status field returned from DescribeProjectVersions. To stop a running model call StopProjectVersion. If the model is training, wait until it finishes.

This operation requires permissions to perform the rekognition:DeleteProjectVersion action.

" }, "DeleteStreamProcessor":{ "name":"DeleteStreamProcessor", @@ -510,6 +510,25 @@ ], "documentation":"

Gets the path tracking results of a Amazon Rekognition Video analysis started by StartPersonTracking.

The person path tracking operation is started by a call to StartPersonTracking which returns a job identifier (JobId). When the operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartPersonTracking.

To get the results of the person path tracking operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

GetPersonTracking returns an array, Persons, of tracked persons and the time(s) their paths were tracked in the video.

GetPersonTracking only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned.

For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

By default, the array is sorted by the time(s) a person's path is tracked in the video. You can sort by tracked persons by specifying INDEX for the SortBy input parameter.

Use the MaxResults parameter to limit the number of items returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetPersonTracking and populate the NextToken request parameter with the token value returned from the previous call to GetPersonTracking.

" }, + "GetSegmentDetection":{ + "name":"GetSegmentDetection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSegmentDetectionRequest"}, + "output":{"shape":"GetSegmentDetectionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerError"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidPaginationTokenException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Gets the segment detection results of a Amazon Rekognition Video analysis started by StartSegmentDetection.

Segment detection with Amazon Rekognition Video is an asynchronous operation. You start segment detection by calling StartSegmentDetection which returns a job identifier (JobId). When the segment detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartSegmentDetection. To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass the job identifier (JobId) from the initial call of StartSegmentDetection.

GetSegmentDetection returns detected segments in an array (Segments) of SegmentDetection objects. Segments is sorted by the segment types specified in the SegmentTypes input parameter of StartSegmentDetection. Each element of the array includes the detected segment, the precentage confidence in the acuracy of the detected segment, the type of the segment, and the frame in which the segment was detected.

Use SelectedSegmentTypes to find out the type of segment detection requested in the call to StartSegmentDetection.

Use the MaxResults parameter to limit the number of segment detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetSegmentDetection and populate the NextToken request parameter with the token value returned from the previous call to GetSegmentDetection.

For more information, see Detecting Video Segments in Stored Video in the Amazon Rekognition Developer Guide.

" + }, "GetTextDetection":{ "name":"GetTextDetection", "http":{ @@ -548,7 +567,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the SearchFaces and SearchFacesByImage operations.

For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.

To get the number of faces in a collection, call DescribeCollection.

If you're using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image.

If you're using version 4 or later of the face model, image orientation information is not returned in the OrientationCorrection field.

To determine which version of the model you're using, call DescribeCollection and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response from IndexFaces

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

If you provide the optional ExternalImageID for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the ListFaces operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

You can specify the maximum number of faces to index with the MaxFaces input parameter. This is useful when you want to index the largest faces in an image and don't want to index smaller faces, such as those belonging to people standing in the background.

The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. By default, IndexFaces chooses the quality bar that's used to filter faces. You can also explicitly choose the quality bar. Use QualityFilter, to set the quality bar by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE.

To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection.

Information about faces detected in an image, but not indexed, is returned in an array of UnindexedFace objects, UnindexedFaces. Faces aren't indexed for reasons such as:

  • The number of faces detected exceeds the value of the MaxFaces request parameter.

  • The face is too small compared to the image dimensions.

  • The face is too blurry.

  • The image is too dark.

  • The face has an extreme pose.

  • The face doesn’t have enough detail to be suitable for face search.

In response, the IndexFaces operation returns an array of metadata for all detected faces, FaceRecords. This includes:

  • The bounding box, BoundingBox, of the detected face.

  • A confidence value, Confidence, which indicates the confidence that the bounding box contains a face.

  • A face ID, FaceId, assigned by the service for each face that's detected and stored.

  • An image ID, ImageId, assigned by the service for the input image.

If you request all facial attributes (by using the detectionAttributes parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" + "documentation":"

Detects faces in the input image and adds them to the specified collection.

Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the SearchFaces and SearchFacesByImage operations.

For more information, see Adding Faces to a Collection in the Amazon Rekognition Developer Guide.

To get the number of faces in a collection, call DescribeCollection.

If you're using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image.

If you're using version 4 or later of the face model, image orientation information is not returned in the OrientationCorrection field.

To determine which version of the model you're using, call DescribeCollection and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response from IndexFaces

For more information, see Model Versioning in the Amazon Rekognition Developer Guide.

If you provide the optional ExternalImageId for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the ListFaces operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image.

You can specify the maximum number of faces to index with the MaxFaces input parameter. This is useful when you want to index the largest faces in an image and don't want to index smaller faces, such as those belonging to people standing in the background.

The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. By default, IndexFaces chooses the quality bar that's used to filter faces. You can also explicitly choose the quality bar. Use QualityFilter, to set the quality bar by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE.

To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection.

Information about faces detected in an image, but not indexed, is returned in an array of UnindexedFace objects, UnindexedFaces. Faces aren't indexed for reasons such as:

  • The number of faces detected exceeds the value of the MaxFaces request parameter.

  • The face is too small compared to the image dimensions.

  • The face is too blurry.

  • The image is too dark.

  • The face has an extreme pose.

  • The face doesn’t have enough detail to be suitable for face search.

In response, the IndexFaces operation returns an array of metadata for all detected faces, FaceRecords. This includes:

  • The bounding box, BoundingBox, of the detected face.

  • A confidence value, Confidence, which indicates the confidence that the bounding box contains a face.

  • A face ID, FaceId, assigned by the service for each face that's detected and stored.

  • An image ID, ImageId, assigned by the service for the input image.

If you request all facial attributes (by using the detectionAttributes parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

This operation requires permissions to perform the rekognition:IndexFaces action.

" }, "ListCollections":{ "name":"ListCollections", @@ -819,6 +838,28 @@ ], "documentation":"

Starts the running of the version of a model. Starting a model takes a while to complete. To check the current state of the model, use DescribeProjectVersions.

Once the model is running, you can detect custom labels in new images by calling DetectCustomLabels.

You are charged for the amount of time that the model is running. To stop a running model, call StopProjectVersion.

This operation requires permissions to perform the rekognition:StartProjectVersion action.

" }, + "StartSegmentDetection":{ + "name":"StartSegmentDetection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartSegmentDetectionRequest"}, + "output":{"shape":"StartSegmentDetectionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"IdempotentParameterMismatchException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidS3ObjectException"}, + {"shape":"InternalServerError"}, + {"shape":"VideoTooLargeException"}, + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Starts asynchronous detection of segment detection in a stored video.

Amazon Rekognition Video can detect segments in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartSegmentDetection returns a job identifier (JobId) which you use to get the results of the operation. When segment detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

You can use the Filters (StartSegmentDetectionFilters) input parameter to specify the minimum detection confidence returned in the response. Within Filters, use ShotFilter (StartShotDetectionFilter) to filter detected shots. Use TechnicalCueFilter (StartTechnicalCueDetectionFilter) to filter technical cues.

To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass the job identifier (JobId) from the initial call to StartSegmentDetection.

For more information, see Detecting Video Segments in Stored Video in the Amazon Rekognition Developer Guide.

", + "idempotent":true + }, "StartStreamProcessor":{ "name":"StartStreamProcessor", "http":{ @@ -943,6 +984,32 @@ "type":"list", "member":{"shape":"Attribute"} }, + "AudioMetadata":{ + "type":"structure", + "members":{ + "Codec":{ + "shape":"String", + "documentation":"

The audio codec used to encode or decode the audio stream.

" + }, + "DurationMillis":{ + "shape":"ULong", + "documentation":"

The duration of the audio stream in milliseconds.

" + }, + "SampleRate":{ + "shape":"ULong", + "documentation":"

The sample rate for the audio stream.

" + }, + "NumberOfChannels":{ + "shape":"ULong", + "documentation":"

The number of audio channels in the segement.

" + } + }, + "documentation":"

Metadata information about an audio stream. An array of AudioMetadata objects for the audio streams found in a stored video is returned by GetSegmentDetection.

" + }, + "AudioMetadataList":{ + "type":"list", + "member":{"shape":"AudioMetadata"} + }, "Beard":{ "type":"structure", "members":{ @@ -1539,7 +1606,7 @@ }, "VersionNames":{ "shape":"VersionNames", - "documentation":"

A list of model version names that you want to describe. You can add up to 10 model version names to the list. If you don't specify a value, all model descriptions are returned.

" + "documentation":"

A list of model version names that you want to describe. You can add up to 10 model version names to the list. If you don't specify a value, all model descriptions are returned. A version name is part of a model (ProjectVersion) ARN. For example, my-model.2020-01-21T09.10.15 is the version name in the following ARN. arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123.

" }, "NextToken":{ "shape":"ExtendedPaginationToken", @@ -2458,13 +2525,64 @@ } } }, + "GetSegmentDetectionRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

Job identifier for the text detection operation for which you want results returned. You get the job identifer from an initial call to StartSegmentDetection.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Maximum number of results to return per paginated call. The largest value you can specify is 1000.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If the response is truncated, Amazon Rekognition Video returns this token that you can use in the subsequent request to retrieve the next set of text.

" + } + } + }, + "GetSegmentDetectionResponse":{ + "type":"structure", + "members":{ + "JobStatus":{ + "shape":"VideoJobStatus", + "documentation":"

Current status of the segment detection job.

" + }, + "StatusMessage":{ + "shape":"StatusMessage", + "documentation":"

If the job fails, StatusMessage provides a descriptive error message.

" + }, + "VideoMetadata":{ + "shape":"VideoMetadataList", + "documentation":"

Currently, Amazon Rekognition Video returns a single object in the VideoMetadata array. The object contains information about the video stream in the input file that Amazon Rekognition Video chose to analyze. The VideoMetadata object includes the video codec, video format and other information. Video metadata is returned in each page of information returned by GetSegmentDetection.

" + }, + "AudioMetadata":{ + "shape":"AudioMetadataList", + "documentation":"

An array of objects. There can be multiple audio streams. Each AudioMetadata object contains metadata for a single audio stream. Audio information in an AudioMetadata objects includes the audio codec, the number of audio channels, the duration of the audio stream, and the sample rate. Audio metadata is returned in each page of information returned by GetSegmentDetection.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If the previous response was incomplete (because there are more labels to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of text.

" + }, + "Segments":{ + "shape":"SegmentDetections", + "documentation":"

An array of segments detected in a video.

" + }, + "SelectedSegmentTypes":{ + "shape":"SegmentTypesInfo", + "documentation":"

An array containing the segment types requested in the call to StartSegmentDetection.

" + } + } + }, "GetTextDetectionRequest":{ "type":"structure", "required":["JobId"], "members":{ "JobId":{ "shape":"JobId", - "documentation":"

Job identifier for the label detection operation for which you want results returned. You get the job identifer from an initial call to StartTextDetection.

" + "documentation":"

Job identifier for the text detection operation for which you want results returned. You get the job identifer from an initial call to StartTextDetection.

" }, "MaxResults":{ "shape":"MaxResults", @@ -2555,7 +2673,7 @@ }, "FlowDefinitionArn":{ "shape":"FlowDefinitionArn", - "documentation":"

The Amazon Resource Name (ARN) of the flow definition.

" + "documentation":"

The Amazon Resource Name (ARN) of the flow definition. You can create a flow definition by using the Amazon Sagemaker CreateFlowDefinition Operation.

" }, "DataAttributes":{ "shape":"HumanLoopDataAttributes", @@ -2583,9 +2701,18 @@ "HumanLoopQuotaExceededException":{ "type":"structure", "members":{ - "ResourceType":{"shape":"String"}, - "QuotaCode":{"shape":"String"}, - "ServiceCode":{"shape":"String"} + "ResourceType":{ + "shape":"String", + "documentation":"

The resource type.

" + }, + "QuotaCode":{ + "shape":"String", + "documentation":"

The quota code.

" + }, + "ServiceCode":{ + "shape":"String", + "documentation":"

The service code.

" + } }, "documentation":"

The number of in-progress human reviews you have has exceeded the number allowed.

", "exception":true @@ -3450,7 +3577,7 @@ "type":"structure", "members":{ }, - "documentation":"

", + "documentation":"

The specified resource is already being used.

", "exception":true }, "ResourceNotFoundException":{ @@ -3605,6 +3732,101 @@ } } }, + "SegmentConfidence":{ + "type":"float", + "max":100, + "min":50 + }, + "SegmentDetection":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"SegmentType", + "documentation":"

The type of the segment. Valid values are TECHNICAL_CUE and SHOT.

" + }, + "StartTimestampMillis":{ + "shape":"Timestamp", + "documentation":"

The start time of the detected segment in milliseconds from the start of the video.

" + }, + "EndTimestampMillis":{ + "shape":"Timestamp", + "documentation":"

The end time of the detected segment, in milliseconds, from the start of the video.

" + }, + "DurationMillis":{ + "shape":"ULong", + "documentation":"

The duration of the detected segment in milliseconds.

" + }, + "StartTimecodeSMPTE":{ + "shape":"Timecode", + "documentation":"

The frame-accurate SMPTE timecode, from the start of a video, for the start of a detected segment. StartTimecode is in HH:MM:SS:fr format (and ;fr for drop frame-rates).

" + }, + "EndTimecodeSMPTE":{ + "shape":"Timecode", + "documentation":"

The frame-accurate SMPTE timecode, from the start of a video, for the end of a detected segment. EndTimecode is in HH:MM:SS:fr format (and ;fr for drop frame-rates).

" + }, + "DurationSMPTE":{ + "shape":"Timecode", + "documentation":"

The duration of the timecode for the detected segment in SMPTE format.

" + }, + "TechnicalCueSegment":{ + "shape":"TechnicalCueSegment", + "documentation":"

If the segment is a technical cue, contains information about the technical cue.

" + }, + "ShotSegment":{ + "shape":"ShotSegment", + "documentation":"

If the segment is a shot detection, contains information about the shot detection.

" + } + }, + "documentation":"

A technical cue or shot detection segment detected in a video. An array of SegmentDetection objects containing all segments detected in a stored video is returned by GetSegmentDetection.

" + }, + "SegmentDetections":{ + "type":"list", + "member":{"shape":"SegmentDetection"} + }, + "SegmentType":{ + "type":"string", + "enum":[ + "TECHNICAL_CUE", + "SHOT" + ] + }, + "SegmentTypeInfo":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"SegmentType", + "documentation":"

The type of a segment (technical cue or shot detection).

" + }, + "ModelVersion":{ + "shape":"String", + "documentation":"

The version of the model used to detect segments.

" + } + }, + "documentation":"

Information about the type of a segment requested in a call to StartSegmentDetection. An array of SegmentTypeInfo objects is returned by the response from GetSegmentDetection.

" + }, + "SegmentTypes":{ + "type":"list", + "member":{"shape":"SegmentType"}, + "min":1 + }, + "SegmentTypesInfo":{ + "type":"list", + "member":{"shape":"SegmentTypeInfo"} + }, + "ShotSegment":{ + "type":"structure", + "members":{ + "Index":{ + "shape":"ULong", + "documentation":"

An Identifier for a shot detection segment detected in a video

" + }, + "Confidence":{ + "shape":"SegmentConfidence", + "documentation":"

The confidence that Amazon Rekognition Video has in the accuracy of the detected segment.

" + } + }, + "documentation":"

Information about a shot detection segment detected in a video. For more information, see SegmentDetection.

" + }, "Smile":{ "type":"structure", "members":{ @@ -3854,6 +4076,69 @@ } } }, + "StartSegmentDetectionFilters":{ + "type":"structure", + "members":{ + "TechnicalCueFilter":{ + "shape":"StartTechnicalCueDetectionFilter", + "documentation":"

Filters that are specific to technical cues.

" + }, + "ShotFilter":{ + "shape":"StartShotDetectionFilter", + "documentation":"

Filters that are specific to shot detections.

" + } + }, + "documentation":"

Filters applied to the technical cue or shot detection segments. For more information, see StartSegmentDetection.

" + }, + "StartSegmentDetectionRequest":{ + "type":"structure", + "required":[ + "Video", + "SegmentTypes" + ], + "members":{ + "Video":{"shape":"Video"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

Idempotent token used to identify the start request. If you use the same token with multiple StartSegmentDetection requests, the same JobId is returned. Use ClientRequestToken to prevent the same job from being accidently started more than once.

" + }, + "NotificationChannel":{ + "shape":"NotificationChannel", + "documentation":"

The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the segment detection operation.

" + }, + "JobTag":{ + "shape":"JobTag", + "documentation":"

An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic. For example, you can use JobTag to group related jobs and identify them in the completion notification.

" + }, + "Filters":{ + "shape":"StartSegmentDetectionFilters", + "documentation":"

Filters for technical cue or shot detection.

" + }, + "SegmentTypes":{ + "shape":"SegmentTypes", + "documentation":"

An array of segment types to detect in the video. Valid values are TECHNICAL_CUE and SHOT.

" + } + } + }, + "StartSegmentDetectionResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

Unique identifier for the segment detection job. The JobId is returned from StartSegmentDetection.

" + } + } + }, + "StartShotDetectionFilter":{ + "type":"structure", + "members":{ + "MinSegmentConfidence":{ + "shape":"SegmentConfidence", + "documentation":"

Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected segment. Confidence represents how certain Amazon Rekognition is that a segment is correctly identified. 0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any segments with a confidence level lower than this specified value.

If you don't specify MinSegmentConfidence, the GetSegmentDetection returns segments with confidence values greater than or equal to 50 percent.

" + } + }, + "documentation":"

Filters for the shot detection segments returned by GetSegmentDetection. For more information, see StartSegmentDetectionFilters.

" + }, "StartStreamProcessorRequest":{ "type":"structure", "required":["Name"], @@ -3869,6 +4154,16 @@ "members":{ } }, + "StartTechnicalCueDetectionFilter":{ + "type":"structure", + "members":{ + "MinSegmentConfidence":{ + "shape":"SegmentConfidence", + "documentation":"

Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected segment. Confidence represents how certain Amazon Rekognition is that a segment is correctly identified. 0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any segments with a confidence level lower than this specified value.

If you don't specify MinSegmentConfidence, GetSegmentDetection returns segments with confidence values greater than or equal to 50 percent.

" + } + }, + "documentation":"

Filters for the technical segments returned by GetSegmentDetection. For more information, see StartSegmentDetectionFilters.

" + }, "StartTextDetectionFilters":{ "type":"structure", "members":{ @@ -4037,6 +4332,28 @@ }, "documentation":"

Indicates whether or not the face is wearing sunglasses, and the confidence level in the determination.

" }, + "TechnicalCueSegment":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"TechnicalCueType", + "documentation":"

The type of the technical cue.

" + }, + "Confidence":{ + "shape":"SegmentConfidence", + "documentation":"

The confidence that Amazon Rekognition Video has in the accuracy of the detected segment.

" + } + }, + "documentation":"

Information about a technical cue segment. For more information, see SegmentDetection.

" + }, + "TechnicalCueType":{ + "type":"string", + "enum":[ + "ColorBars", + "EndCredits", + "BlackFrames" + ] + }, "TestingData":{ "type":"structure", "members":{ @@ -4132,6 +4449,7 @@ "exception":true, "fault":true }, + "Timecode":{"type":"string"}, "Timestamp":{"type":"long"}, "TrainingData":{ "type":"structure", @@ -4248,6 +4566,10 @@ }, "documentation":"

Information about a video that Amazon Rekognition analyzed. Videometadata is returned in every page of paginated responses from a Amazon Rekognition video operation.

" }, + "VideoMetadataList":{ + "type":"list", + "member":{"shape":"VideoMetadata"} + }, "VideoTooLargeException":{ "type":"structure", "members":{ diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index aeaa709814ed..76c41110b076 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index a48611b81fd4..6635ae2f91b4 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/resourcegroupstaggingapi/src/main/resources/codegen-resources/service-2.json b/services/resourcegroupstaggingapi/src/main/resources/codegen-resources/service-2.json index d405b011fb80..58d5b3cbe558 100644 --- a/services/resourcegroupstaggingapi/src/main/resources/codegen-resources/service-2.json +++ b/services/resourcegroupstaggingapi/src/main/resources/codegen-resources/service-2.json @@ -555,10 +555,10 @@ }, "Value":{ "shape":"TagValue", - "documentation":"

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

" + "documentation":"

One part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key). The value can be empty or null.

" } }, - "documentation":"

The metadata that you apply to AWS resources to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. For more information, see Tagging AWS Resources in the AWS General Reference.

" + "documentation":"

The metadata that you apply to AWS resources to help you categorize and organize them. Each tag consists of a key and a value, both of which you define. For more information, see Tagging AWS Resources in the AWS General Reference.

" }, "TagFilter":{ "type":"structure", @@ -569,7 +569,7 @@ }, "Values":{ "shape":"TagValueList", - "documentation":"

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

" + "documentation":"

One part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key). The value can be empty or null.

" } }, "documentation":"

A list of tags (keys and values) that are used to specify the associated resources.

" @@ -622,7 +622,7 @@ "members":{ "ResourceARNList":{ "shape":"ResourceARNList", - "documentation":"

A list of ARNs. An ARN (Amazon Resource Name) uniquely identifies a resource. You can specify a minimum of 1 and a maximum of 20 ARNs (resources) to tag. An ARN can be set to a maximum of 1600 characters. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

A list of ARNs. An ARN (Amazon Resource Name) uniquely identifies a resource. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" }, "Tags":{ "shape":"TagMap", @@ -693,7 +693,7 @@ "members":{ "ResourceARNList":{ "shape":"ResourceARNList", - "documentation":"

A list of ARNs. An ARN (Amazon Resource Name) uniquely identifies a resource. You can specify a minimum of 1 and a maximum of 20 ARNs (resources) to untag. An ARN can be set to a maximum of 1600 characters. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" + "documentation":"

A list of ARNs. An ARN (Amazon Resource Name) uniquely identifies a resource. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

" }, "TagKeys":{ "shape":"TagKeyListForUntag", @@ -711,5 +711,5 @@ } } }, - "documentation":"Resource Groups Tagging API

This guide describes the API operations for the resource groups tagging.

A tag is a label that you assign to an AWS resource. A tag consists of a key and a value, both of which you define. For example, if you have two Amazon EC2 instances, you might assign both a tag key of \"Stack.\" But the value of \"Stack\" might be \"Testing\" for one and \"Production\" for the other.

Tagging can help you organize your resources and enables you to simplify resource management, access management and cost allocation.

You can use the resource groups tagging API operations to complete the following tasks:

  • Tag and untag supported resources located in the specified Region for the AWS account.

  • Use tag-based filters to search for resources located in the specified Region for the AWS account.

  • List all existing tag keys in the specified Region for the AWS account.

  • List all existing values for the specified key in the specified Region for the AWS account.

To use resource groups tagging API operations, you must add the following permissions to your IAM policy:

  • tag:GetResources

  • tag:TagResources

  • tag:UntagResources

  • tag:GetTagKeys

  • tag:GetTagValues

You'll also need permissions to access the resources of individual services so that you can tag and untag those resources.

For more information on IAM policies, see Managing IAM Policies in the IAM User Guide.

You can use the Resource Groups Tagging API to tag resources for the following AWS services.

  • Alexa for Business (a4b)

  • API Gateway

  • Amazon AppStream

  • AWS AppSync

  • AWS App Mesh

  • Amazon Athena

  • Amazon Aurora

  • AWS Backup

  • AWS Certificate Manager

  • AWS Certificate Manager Private CA

  • Amazon Cloud Directory

  • AWS CloudFormation

  • Amazon CloudFront

  • AWS CloudHSM

  • AWS CloudTrail

  • Amazon CloudWatch (alarms only)

  • Amazon CloudWatch Events

  • Amazon CloudWatch Logs

  • AWS CodeBuild

  • AWS CodeCommit

  • AWS CodePipeline

  • AWS CodeStar

  • Amazon Cognito Identity

  • Amazon Cognito User Pools

  • Amazon Comprehend

  • AWS Config

  • AWS Data Exchange

  • AWS Data Pipeline

  • AWS Database Migration Service

  • AWS DataSync

  • AWS Device Farm

  • AWS Direct Connect

  • AWS Directory Service

  • Amazon DynamoDB

  • Amazon EBS

  • Amazon EC2

  • Amazon ECR

  • Amazon ECS

  • Amazon EKS

  • AWS Elastic Beanstalk

  • Amazon Elastic File System

  • Elastic Load Balancing

  • Amazon ElastiCache

  • Amazon Elasticsearch Service

  • AWS Elemental MediaLive

  • AWS Elemental MediaPackage

  • AWS Elemental MediaTailor

  • Amazon EMR

  • Amazon FSx

  • Amazon S3 Glacier

  • AWS Glue

  • Amazon GuardDuty

  • Amazon Inspector

  • AWS IoT Analytics

  • AWS IoT Core

  • AWS IoT Device Defender

  • AWS IoT Device Management

  • AWS IoT Events

  • AWS IoT Greengrass

  • AWS IoT 1-Click

  • AWS Key Management Service

  • Amazon Kinesis

  • Amazon Kinesis Data Analytics

  • Amazon Kinesis Data Firehose

  • AWS Lambda

  • AWS License Manager

  • Amazon Machine Learning

  • Amazon MQ

  • Amazon MSK

  • Amazon Neptune

  • AWS OpsWorks

  • AWS Organizations

  • Amazon Quantum Ledger Database (QLDB)

  • Amazon RDS

  • Amazon Redshift

  • AWS Resource Access Manager

  • AWS Resource Groups

  • AWS RoboMaker

  • Amazon Route 53

  • Amazon Route 53 Resolver

  • Amazon S3 (buckets only)

  • Amazon SageMaker

  • AWS Secrets Manager

  • AWS Security Hub

  • AWS Service Catalog

  • Amazon Simple Notification Service (SNS)

  • Amazon Simple Queue Service (SQS)

  • Amazon Simple Workflow Service

  • AWS Step Functions

  • AWS Storage Gateway

  • AWS Systems Manager

  • AWS Transfer for SFTP

  • Amazon VPC

  • Amazon WorkSpaces

" + "documentation":"Resource Groups Tagging API

This guide describes the API operations for the resource groups tagging.

A tag is a label that you assign to an AWS resource. A tag consists of a key and a value, both of which you define. For example, if you have two Amazon EC2 instances, you might assign both a tag key of \"Stack.\" But the value of \"Stack\" might be \"Testing\" for one and \"Production\" for the other.

Tagging can help you organize your resources and enables you to simplify resource management, access management and cost allocation.

You can use the resource groups tagging API operations to complete the following tasks:

  • Tag and untag supported resources located in the specified Region for the AWS account.

  • Use tag-based filters to search for resources located in the specified Region for the AWS account.

  • List all existing tag keys in the specified Region for the AWS account.

  • List all existing values for the specified key in the specified Region for the AWS account.

To use resource groups tagging API operations, you must add the following permissions to your IAM policy:

  • tag:GetResources

  • tag:TagResources

  • tag:UntagResources

  • tag:GetTagKeys

  • tag:GetTagValues

You'll also need permissions to access the resources of individual services so that you can tag and untag those resources.

For more information on IAM policies, see Managing IAM Policies in the IAM User Guide.

You can use the Resource Groups Tagging API to tag resources for the following AWS services.

  • Alexa for Business (a4b)

  • API Gateway

  • Amazon AppStream

  • AWS AppSync

  • AWS App Mesh

  • Amazon Athena

  • Amazon Aurora

  • AWS Backup

  • AWS Certificate Manager

  • AWS Certificate Manager Private CA

  • Amazon Cloud Directory

  • AWS CloudFormation

  • Amazon CloudFront

  • AWS CloudHSM

  • AWS CloudTrail

  • Amazon CloudWatch (alarms only)

  • Amazon CloudWatch Events

  • Amazon CloudWatch Logs

  • AWS CodeBuild

  • AWS CodeCommit

  • AWS CodePipeline

  • AWS CodeStar

  • Amazon Cognito Identity

  • Amazon Cognito User Pools

  • Amazon Comprehend

  • AWS Config

  • AWS Data Exchange

  • AWS Data Pipeline

  • AWS Database Migration Service

  • AWS DataSync

  • AWS Device Farm

  • AWS Direct Connect

  • AWS Directory Service

  • Amazon DynamoDB

  • Amazon EBS

  • Amazon EC2

  • Amazon ECR

  • Amazon ECS

  • Amazon EKS

  • AWS Elastic Beanstalk

  • Amazon Elastic File System

  • Elastic Load Balancing

  • Amazon ElastiCache

  • Amazon Elasticsearch Service

  • AWS Elemental MediaLive

  • AWS Elemental MediaPackage

  • AWS Elemental MediaTailor

  • Amazon EMR

  • Amazon FSx

  • Amazon S3 Glacier

  • AWS Glue

  • Amazon GuardDuty

  • Amazon Inspector

  • AWS IoT Analytics

  • AWS IoT Core

  • AWS IoT Device Defender

  • AWS IoT Device Management

  • AWS IoT Events

  • AWS IoT Greengrass

  • AWS IoT 1-Click

  • AWS IoT Things Graph

  • AWS Key Management Service

  • Amazon Kinesis

  • Amazon Kinesis Data Analytics

  • Amazon Kinesis Data Firehose

  • AWS Lambda

  • AWS License Manager

  • Amazon Machine Learning

  • Amazon MQ

  • Amazon MSK

  • Amazon Neptune

  • AWS OpsWorks

  • AWS Organizations

  • Amazon Quantum Ledger Database (QLDB)

  • Amazon RDS

  • Amazon Redshift

  • AWS Resource Access Manager

  • AWS Resource Groups

  • AWS RoboMaker

  • Amazon Route 53

  • Amazon Route 53 Resolver

  • Amazon S3 (buckets only)

  • Amazon SageMaker

  • AWS Secrets Manager

  • AWS Security Hub

  • AWS Service Catalog

  • Amazon Simple Email Service (SES)

  • Amazon Simple Notification Service (SNS)

  • Amazon Simple Queue Service (SQS)

  • Amazon Simple Workflow Service

  • AWS Step Functions

  • AWS Storage Gateway

  • AWS Systems Manager

  • AWS Transfer for SFTP

  • AWS WAF Regional

  • Amazon VPC

  • Amazon WorkSpaces

" } diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index b5dcd10edef8..f8dd574d2397 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/robomaker/src/main/resources/codegen-resources/service-2.json b/services/robomaker/src/main/resources/codegen-resources/service-2.json index bde7e0e5d485..0bac97f4d804 100644 --- a/services/robomaker/src/main/resources/codegen-resources/service-2.json +++ b/services/robomaker/src/main/resources/codegen-resources/service-2.json @@ -783,6 +783,26 @@ "min":1, "pattern":"[a-zA-Z0-9_.\\-]*" }, + "Compute":{ + "type":"structure", + "members":{ + "simulationUnitLimit":{ + "shape":"SimulationUnit", + "documentation":"

The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximim value provided.

" + } + }, + "documentation":"

Compute information for the simulation job.

" + }, + "ComputeResponse":{ + "type":"structure", + "members":{ + "simulationUnitLimit":{ + "shape":"SimulationUnit", + "documentation":"

The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximim value provided.

" + } + }, + "documentation":"

Compute information for the simulation job

" + }, "ConcurrentDeploymentException":{ "type":"structure", "members":{ @@ -1250,6 +1270,10 @@ "vpcConfig":{ "shape":"VPCConfig", "documentation":"

If your simulation job accesses resources in a VPC, you provide this parameter identifying the list of security group IDs and subnet IDs. These must belong to the same VPC. You must provide at least one security group and one subnet ID.

" + }, + "compute":{ + "shape":"Compute", + "documentation":"

Compute information for the simulation job.

" } } }, @@ -1328,6 +1352,10 @@ "vpcConfig":{ "shape":"VPCConfigResponse", "documentation":"

Information about the vpc configuration.

" + }, + "compute":{ + "shape":"ComputeResponse", + "documentation":"

Compute information for the simulation job.

" } } }, @@ -2075,6 +2103,10 @@ "networkInterface":{ "shape":"NetworkInterface", "documentation":"

The network interface information for the simulation job.

" + }, + "compute":{ + "shape":"ComputeResponse", + "documentation":"

Compute information for the simulation job.

" } } }, @@ -3191,6 +3223,10 @@ "networkInterface":{ "shape":"NetworkInterface", "documentation":"

Information about a network interface.

" + }, + "compute":{ + "shape":"ComputeResponse", + "documentation":"

Compute information for the simulation job

" } }, "documentation":"

Information about a simulation job.

" @@ -3318,6 +3354,10 @@ "documentation":"

Specify data sources to mount read-only files from S3 into your simulation. These files are available under /opt/robomaker/datasources/data_source_name.

There is a limit of 100 files and a combined size of 25GB for all DataSourceConfig objects.

" }, "vpcConfig":{"shape":"VPCConfig"}, + "compute":{ + "shape":"Compute", + "documentation":"

Compute information for the simulation job

" + }, "tags":{ "shape":"TagMap", "documentation":"

A map that contains tag keys and tag values that are attached to the simulation job request.

" @@ -3412,6 +3452,11 @@ "pattern":"7|9|Kinetic|Melodic|Dashing" }, "SimulationTimeMillis":{"type":"long"}, + "SimulationUnit":{ + "type":"integer", + "max":15, + "min":1 + }, "Source":{ "type":"structure", "members":{ diff --git a/services/route53/pom.xml b/services/route53/pom.xml index 50e72875a5ae..bb75751c8edb 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53/src/main/resources/codegen-resources/paginators-1.json b/services/route53/src/main/resources/codegen-resources/paginators-1.json index 5a7cea3967cf..418ed85db220 100644 --- a/services/route53/src/main/resources/codegen-resources/paginators-1.json +++ b/services/route53/src/main/resources/codegen-resources/paginators-1.json @@ -14,6 +14,12 @@ "output_token": "NextMarker", "result_key": "HostedZones" }, + "ListQueryLoggingConfigs": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "QueryLoggingConfigs" + }, "ListResourceRecordSets": { "input_token": [ "StartRecordName", diff --git a/services/route53/src/main/resources/codegen-resources/service-2.json b/services/route53/src/main/resources/codegen-resources/service-2.json index f39907861384..38313f8d315a 100644 --- a/services/route53/src/main/resources/codegen-resources/service-2.json +++ b/services/route53/src/main/resources/codegen-resources/service-2.json @@ -31,7 +31,8 @@ {"shape":"InvalidInput"}, {"shape":"PublicZoneVPCAssociation"}, {"shape":"ConflictingDomainExists"}, - {"shape":"LimitsExceeded"} + {"shape":"LimitsExceeded"}, + {"shape":"PriorRequestNotComplete"} ], "documentation":"

Associates an Amazon VPC with a private hosted zone.

To perform the association, the VPC and the private hosted zone must already exist. Also, you can't convert a public hosted zone into a private hosted zone.

If you want to associate a VPC that was created by one AWS account with a private hosted zone that was created by a different account, do one of the following:

  • Use the AWS account that created the private hosted zone to submit a CreateVPCAssociationAuthorization request. Then use the account that created the VPC to submit an AssociateVPCWithHostedZone request.

  • If a subnet in the VPC was shared with another account, you can use the account that the subnet was shared with to submit an AssociateVPCWithHostedZone request. For more information about sharing subnets, see Working with Shared VPCs.

" }, @@ -54,7 +55,7 @@ {"shape":"InvalidInput"}, {"shape":"PriorRequestNotComplete"} ], - "documentation":"

Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.

Change Batches and Transactional Changes

The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. When using the Amazon Route 53 API to change resource record sets, Route 53 either makes all or none of the changes in a change batch request. This ensures that Route 53 never partially implements the intended changes to the resource record sets in a hosted zone.

For example, a change batch request that deletes the CNAME record for www.example.com and creates an alias resource record set for www.example.com. Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If either the DELETE or the CREATE action fails, then both changes (plus any other changes in the batch) fail, and the original CNAME record continues to exist.

Due to the nature of transactional changes, you can't delete the same resource record set more than once in a single change batch. If you attempt to delete the same change batch more than once, Route 53 returns an InvalidChangeBatch error.

Traffic Flow

To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.

Create, Delete, and Upsert

Use ChangeResourceRecordsSetsRequest to perform the following actions:

  • CREATE: Creates a resource record set that has the specified values.

  • DELETE: Deletes an existing resource record set that has the specified values.

  • UPSERT: If a resource record set does not already exist, AWS creates it. If a resource set does exist, Route 53 updates it with the values in the request.

Syntaxes for Creating, Updating, and Deleting Resource Record Sets

The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.

For an example for each type of resource record set, see \"Examples.\"

Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets.

Change Propagation to Route 53 DNS Servers

When you submit a ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers. While your changes are propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 name servers within 60 seconds. For more information, see GetChange.

Limits on ChangeResourceRecordSets Requests

For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide.

" + "documentation":"

Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.

Deleting Resource Record Sets

To delete a resource record set, you must specify all the same values that you specified when you created it.

Change Batches and Transactional Changes

The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.

For example, suppose a change batch request contains two changes: it deletes the CNAME resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE or the CREATE action fails, then the request is canceled, and the original CNAME record continues to exist.

If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch error.

Traffic Flow

To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.

Create, Delete, and Upsert

Use ChangeResourceRecordsSetsRequest to perform the following actions:

  • CREATE: Creates a resource record set that has the specified values.

  • DELETE: Deletes an existing resource record set that has the specified values.

  • UPSERT: If a resource record set does not already exist, AWS creates it. If a resource set does exist, Route 53 updates it with the values in the request.

Syntaxes for Creating, Updating, and Deleting Resource Record Sets

The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.

For an example for each type of resource record set, see \"Examples.\"

Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets.

Change Propagation to Route 53 DNS Servers

When you submit a ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers. While your changes are propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 name servers within 60 seconds. For more information, see GetChange.

Limits on ChangeResourceRecordSets Requests

For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide.

" }, "ChangeTagsForResource":{ "name":"ChangeTagsForResource", @@ -390,7 +391,7 @@ {"shape":"LastVPCAssociation"}, {"shape":"InvalidInput"} ], - "documentation":"

Disassociates a VPC from a Amazon Route 53 private hosted zone. Note the following:

  • You can't disassociate the last VPC from a private hosted zone.

  • You can't convert a private hosted zone into a public hosted zone.

  • You can submit a DisassociateVPCFromHostedZone request using either the account that created the hosted zone or the account that created the VPC.

" + "documentation":"

Disassociates an Amazon Virtual Private Cloud (Amazon VPC) from an Amazon Route 53 private hosted zone. Note the following:

  • You can't disassociate the last Amazon VPC from a private hosted zone.

  • You can't convert a private hosted zone into a public hosted zone.

  • You can submit a DisassociateVPCFromHostedZone request using either the account that created the hosted zone or the account that created the Amazon VPC.

  • Some services, such as AWS Cloud Map and Amazon Elastic File System (Amazon EFS) automatically create hosted zones and associate VPCs with the hosted zones. A service can create a hosted zone using your account or using its own account. You can disassociate a VPC from a hosted zone only if the service created the hosted zone using your account.

    When you run DisassociateVPCFromHostedZone, if the hosted zone has a value for OwningAccount, you can use DisassociateVPCFromHostedZone. If the hosted zone has a value for OwningService, you can't use DisassociateVPCFromHostedZone.

" }, "GetAccountLimit":{ "name":"GetAccountLimit", @@ -675,6 +676,20 @@ ], "documentation":"

Retrieves a list of your hosted zones in lexicographic order. The response includes a HostedZones child element for each hosted zone created by the current AWS account.

ListHostedZonesByName sorts hosted zones by name with the labels reversed. For example:

com.example.www.

Note the trailing dot, which can change the sort order in some circumstances.

If the domain name includes escape characters or Punycode, ListHostedZonesByName alphabetizes the domain name using the escaped or Punycoded value, which is the format that Amazon Route 53 saves in its database. For example, to create a hosted zone for exämple.com, you specify ex\\344mple.com for the domain name. ListHostedZonesByName alphabetizes it as:

com.ex\\344mple.

The labels are reversed and alphabetized using the escaped value. For more information about valid domain name formats, including internationalized domain names, see DNS Domain Name Format in the Amazon Route 53 Developer Guide.

Route 53 returns up to 100 items in each response. If you have a lot of hosted zones, use the MaxItems parameter to list them in groups of up to 100. The response includes values that help navigate from one group of MaxItems hosted zones to the next:

  • The DNSName and HostedZoneId elements in the response contain the values, if any, specified for the dnsname and hostedzoneid parameters in the request that produced the current response.

  • The MaxItems element in the response contains the value, if any, that you specified for the maxitems parameter in the request that produced the current response.

  • If the value of IsTruncated in the response is true, there are more hosted zones associated with the current AWS account.

    If IsTruncated is false, this response includes the last hosted zone that is associated with the current account. The NextDNSName element and NextHostedZoneId elements are omitted from the response.

  • The NextDNSName and NextHostedZoneId elements in the response contain the domain name and the hosted zone ID of the next hosted zone that is associated with the current AWS account. If you want to list more hosted zones, make another call to ListHostedZonesByName, and specify the value of NextDNSName and NextHostedZoneId in the dnsname and hostedzoneid parameters, respectively.

" }, + "ListHostedZonesByVPC":{ + "name":"ListHostedZonesByVPC", + "http":{ + "method":"GET", + "requestUri":"/2013-04-01/hostedzonesbyvpc" + }, + "input":{"shape":"ListHostedZonesByVPCRequest"}, + "output":{"shape":"ListHostedZonesByVPCResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"InvalidPaginationToken"} + ], + "documentation":"

Lists all the private hosted zones that a specified VPC is associated with, regardless of which AWS account or AWS service owns the hosted zones. The HostedZoneOwner structure in the response contains one of the following values:

  • An OwningAccount element, which contains the account number of either the current AWS account or another AWS account. Some services, such as AWS Cloud Map, create hosted zones using the current account.

  • An OwningService element, which identifies the AWS service that created and owns the hosted zone. For example, if a hosted zone was created by Amazon Elastic File System (Amazon EFS), the value of Owner is efs.amazonaws.com.

" + }, "ListQueryLoggingConfigs":{ "name":"ListQueryLoggingConfigs", "http":{ @@ -934,6 +949,7 @@ } }, "shapes":{ + "AWSAccountID":{"type":"string"}, "AccountLimit":{ "type":"structure", "required":[ @@ -1278,7 +1294,13 @@ "eu-north-1", "sa-east-1", "cn-northwest-1", - "cn-north-1" + "cn-north-1", + "af-south-1", + "eu-south-1", + "us-gov-west-1", + "us-gov-east-1", + "us-iso-east-1", + "us-isob-east-1" ], "max":64, "min":1 @@ -1309,7 +1331,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

The cause of this error depends on whether you're trying to create a public or a private hosted zone:

  • Public hosted zone: Two hosted zones that have the same name or that have a parent/child relationship (example.com and test.example.com) can't have any common name servers. You tried to create a hosted zone that has the same name as an existing hosted zone or that's the parent or child of an existing hosted zone, and you specified a delegation set that shares one or more name servers with the existing hosted zone. For more information, see CreateReusableDelegationSet.

  • Private hosted zone: You specified an Amazon VPC that you're already using for another hosted zone, and the domain that you specified for one of the hosted zones is a subdomain of the domain that you specified for the other hosted zone. For example, you can't use the same Amazon VPC for the hosted zones for example.com and test.example.com.

", + "documentation":"

The cause of this error depends on the operation that you're performing:

  • Create a public hosted zone: Two hosted zones that have the same name or that have a parent/child relationship (example.com and test.example.com) can't have any common name servers. You tried to create a hosted zone that has the same name as an existing hosted zone or that's the parent or child of an existing hosted zone, and you specified a delegation set that shares one or more name servers with the existing hosted zone. For more information, see CreateReusableDelegationSet.

  • Create a private hosted zone: A hosted zone with the specified name already exists and is already associated with the Amazon VPC that you specified.

  • Associate VPCs with a private hosted zone: The VPC that you specified is already associated with another hosted zone that has the same name.

", "exception":true }, "ConflictingTypes":{ @@ -2891,7 +2913,55 @@ "documentation":"

The specified hosted zone is a public hosted zone, not a private hosted zone.

", "exception":true }, + "HostedZoneOwner":{ + "type":"structure", + "members":{ + "OwningAccount":{ + "shape":"AWSAccountID", + "documentation":"

If the hosted zone was created by an AWS account, or was created by an AWS service that creates hosted zones using the current account, OwningAccount contains the account ID of that account. For example, when you use AWS Cloud Map to create a hosted zone, Cloud Map creates the hosted zone using the current AWS account.

" + }, + "OwningService":{ + "shape":"HostedZoneOwningService", + "documentation":"

If an AWS service uses its own account to create a hosted zone and associate the specified VPC with that hosted zone, OwningService contains an abbreviation that identifies the service. For example, if Amazon Elastic File System (Amazon EFS) created a hosted zone and associated a VPC with the hosted zone, the value of OwningService is efs.amazonaws.com.

" + } + }, + "documentation":"

A complex type that identifies a hosted zone that a specified Amazon VPC is associated with and the owner of the hosted zone. If there is a value for OwningAccount, there is no value for OwningService, and vice versa.

" + }, + "HostedZoneOwningService":{ + "type":"string", + "max":128 + }, "HostedZoneRRSetCount":{"type":"long"}, + "HostedZoneSummaries":{ + "type":"list", + "member":{ + "shape":"HostedZoneSummary", + "locationName":"HostedZoneSummary" + } + }, + "HostedZoneSummary":{ + "type":"structure", + "required":[ + "HostedZoneId", + "Name", + "Owner" + ], + "members":{ + "HostedZoneId":{ + "shape":"ResourceId", + "documentation":"

The Route 53 hosted zone ID of a private hosted zone that the specified VPC is associated with.

" + }, + "Name":{ + "shape":"DNSName", + "documentation":"

The name of the private hosted zone, such as example.com.

" + }, + "Owner":{ + "shape":"HostedZoneOwner", + "documentation":"

The owner of a private hosted zone that the specified VPC is associated with. The owner can be either an AWS account or an AWS service.

" + } + }, + "documentation":"

In the response to a ListHostedZonesByVPC request, the HostedZoneSummaries element contains one HostedZoneSummary element for each hosted zone that the specified Amazon VPC is associated with. Each HostedZoneSummary element contains the hosted zone name and ID, and information about who owns the hosted zone.

" + }, "HostedZones":{ "type":"list", "member":{ @@ -3232,6 +3302,61 @@ }, "documentation":"

A complex type that contains the response information for the request.

" }, + "ListHostedZonesByVPCRequest":{ + "type":"structure", + "required":[ + "VPCId", + "VPCRegion" + ], + "members":{ + "VPCId":{ + "shape":"VPCId", + "documentation":"

The ID of the Amazon VPC that you want to list hosted zones for.

", + "location":"querystring", + "locationName":"vpcid" + }, + "VPCRegion":{ + "shape":"VPCRegion", + "documentation":"

For the Amazon VPC that you specified for VPCId, the AWS Region that you created the VPC in.

", + "location":"querystring", + "locationName":"vpcregion" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "documentation":"

(Optional) The maximum number of hosted zones that you want Amazon Route 53 to return. If the specified VPC is associated with more than MaxItems hosted zones, the response includes a NextToken element. NextToken contains the hosted zone ID of the first hosted zone that Route 53 will return if you submit another request.

", + "location":"querystring", + "locationName":"maxitems" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

If the previous response included a NextToken element, the specified VPC is associated with more hosted zones. To get more hosted zones, submit another ListHostedZonesByVPC request.

For the value of NextToken, specify the value of NextToken from the previous response.

If the previous response didn't include a NextToken element, there are no more hosted zones to get.

", + "location":"querystring", + "locationName":"nexttoken" + } + }, + "documentation":"

Lists all the private hosted zones that a specified VPC is associated with, regardless of which AWS account created the hosted zones.

" + }, + "ListHostedZonesByVPCResponse":{ + "type":"structure", + "required":[ + "HostedZoneSummaries", + "MaxItems" + ], + "members":{ + "HostedZoneSummaries":{ + "shape":"HostedZoneSummaries", + "documentation":"

A list that contains one HostedZoneSummary element for each hosted zone that the specified Amazon VPC is associated with. Each HostedZoneSummary element contains the hosted zone name and ID, and information about who owns the hosted zone.

" + }, + "MaxItems":{ + "shape":"PageMaxItems", + "documentation":"

The value that you specified for MaxItems in the most recent ListHostedZonesByVPC request.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

The value that you specified for NextToken in the most recent ListHostedZonesByVPC request.

" + } + } + }, "ListHostedZonesRequest":{ "type":"structure", "members":{ @@ -4008,7 +4133,7 @@ "PageTruncated":{"type":"boolean"}, "PaginationToken":{ "type":"string", - "max":256 + "max":1024 }, "Period":{ "type":"integer", @@ -4263,7 +4388,9 @@ "cn-northwest-1", "ap-east-1", "me-south-1", - "ap-south-1" + "ap-south-1", + "af-south-1", + "eu-south-1" ], "max":64, "min":1 @@ -5072,6 +5199,10 @@ "eu-central-1", "ap-east-1", "me-south-1", + "us-gov-west-1", + "us-gov-east-1", + "us-iso-east-1", + "us-isob-east-1", "ap-southeast-1", "ap-southeast-2", "ap-south-1", @@ -5081,7 +5212,9 @@ "eu-north-1", "sa-east-1", "ca-central-1", - "cn-north-1" + "cn-north-1", + "af-south-1", + "eu-south-1" ], "max":64, "min":1 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index 98aee35662db..4ae59449a4b1 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53domains/src/main/resources/codegen-resources/service-2.json b/services/route53domains/src/main/resources/codegen-resources/service-2.json index 4a7dfc942e1a..dd083a2acfe8 100644 --- a/services/route53domains/src/main/resources/codegen-resources/service-2.json +++ b/services/route53domains/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,35 @@ "uid":"route53domains-2014-05-15" }, "operations":{ + "AcceptDomainTransferFromAnotherAwsAccount":{ + "name":"AcceptDomainTransferFromAnotherAwsAccount", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptDomainTransferFromAnotherAwsAccountRequest"}, + "output":{"shape":"AcceptDomainTransferFromAnotherAwsAccountResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"DomainLimitExceeded"} + ], + "documentation":"

Accepts the transfer of a domain from another AWS account to the current AWS account. You initiate a transfer between AWS accounts using TransferDomainToAnotherAwsAccount.

Use either ListOperations or GetOperationDetail to determine whether the operation succeeded. GetOperationDetail provides additional information, for example, Domain Transfer from Aws Account 111122223333 has been cancelled.

" + }, + "CancelDomainTransferToAnotherAwsAccount":{ + "name":"CancelDomainTransferToAnotherAwsAccount", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelDomainTransferToAnotherAwsAccountRequest"}, + "output":{"shape":"CancelDomainTransferToAnotherAwsAccountResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"OperationLimitExceeded"} + ], + "documentation":"

Cancels the transfer of a domain from the current AWS account to another AWS account. You initiate a transfer between AWS accounts using TransferDomainToAnotherAwsAccount.

You must cancel the transfer before the other AWS account accepts the transfer using AcceptDomainTransferFromAnotherAwsAccount.

Use either ListOperations or GetOperationDetail to determine whether the operation succeeded. GetOperationDetail provides additional information, for example, Domain Transfer from Aws Account 111122223333 has been cancelled.

" + }, "CheckDomainAvailability":{ "name":"CheckDomainAvailability", "http":{ @@ -99,7 +128,7 @@ {"shape":"UnsupportedTLD"}, {"shape":"TLDRulesViolation"} ], - "documentation":"

This operation configures Amazon Route 53 to automatically renew the specified domain before the domain registration expires. The cost of renewing your domain registration is billed to your AWS account.

The period during which you can renew a domain name varies by TLD. For a list of TLDs and their renewal policies, see \"Renewal, restoration, and deletion times\" on the website for our registrar associate, Gandi. Amazon Route 53 requires that you renew before the end of the renewal period that is listed on the Gandi website so we can complete processing before the deadline.

" + "documentation":"

This operation configures Amazon Route 53 to automatically renew the specified domain before the domain registration expires. The cost of renewing your domain registration is billed to your AWS account.

The period during which you can renew a domain name varies by TLD. For a list of TLDs and their renewal policies, see Domains That You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide. Route 53 requires that you renew before the end of the renewal period so we can complete processing before the deadline.

" }, "EnableDomainTransferLock":{ "name":"EnableDomainTransferLock", @@ -159,7 +188,7 @@ {"shape":"InvalidInput"}, {"shape":"UnsupportedTLD"} ], - "documentation":"

The GetDomainSuggestions operation returns a list of suggested domain names given a string, which can either be a domain name or simply a word or phrase (without spaces).

" + "documentation":"

The GetDomainSuggestions operation returns a list of suggested domain names.

" }, "GetOperationDetail":{ "name":"GetOperationDetail", @@ -198,7 +227,7 @@ "errors":[ {"shape":"InvalidInput"} ], - "documentation":"

This operation returns the operation IDs of operations that are not yet complete.

" + "documentation":"

Returns information about all of the operations that return an operation ID and that have ever been performed on domains that were registered by the current account.

" }, "ListTagsForDomain":{ "name":"ListTagsForDomain", @@ -231,7 +260,21 @@ {"shape":"DomainLimitExceeded"}, {"shape":"OperationLimitExceeded"} ], - "documentation":"

This operation registers a domain. Domains are registered either by Amazon Registrar (for .com, .net, and .org domains) or by our registrar associate, Gandi (for all other domains). For some top-level domains (TLDs), this operation requires extra parameters.

When you register a domain, Amazon Route 53 does the following:

  • Creates a Amazon Route 53 hosted zone that has the same name as the domain. Amazon Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name servers.

  • Enables autorenew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration.

  • Optionally enables privacy protection, so WHOIS queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you don't enable privacy protection, WHOIS queries return the information that you entered for the registrant, admin, and tech contacts.

  • If registration is successful, returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant is notified by email.

  • Charges your AWS account an amount based on the top-level domain. For more information, see Amazon Route 53 Pricing.

" + "documentation":"

This operation registers a domain. Domains are registered either by Amazon Registrar (for .com, .net, and .org domains) or by our registrar associate, Gandi (for all other domains). For some top-level domains (TLDs), this operation requires extra parameters.

When you register a domain, Amazon Route 53 does the following:

  • Creates a Route 53 hosted zone that has the same name as the domain. Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name servers.

  • Enables autorenew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration.

  • Optionally enables privacy protection, so WHOIS queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you don't enable privacy protection, WHOIS queries return the information that you entered for the registrant, admin, and tech contacts.

  • If registration is successful, returns an operation ID that you can use to track the progress and completion of the action. If the request is not completed successfully, the domain registrant is notified by email.

  • Charges your AWS account an amount based on the top-level domain. For more information, see Amazon Route 53 Pricing.

" + }, + "RejectDomainTransferFromAnotherAwsAccount":{ + "name":"RejectDomainTransferFromAnotherAwsAccount", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectDomainTransferFromAnotherAwsAccountRequest"}, + "output":{"shape":"RejectDomainTransferFromAnotherAwsAccountResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"OperationLimitExceeded"} + ], + "documentation":"

Rejects the transfer of a domain from another AWS account to the current AWS account. You initiate a transfer between AWS accounts using TransferDomainToAnotherAwsAccount.

Use either ListOperations or GetOperationDetail to determine whether the operation succeeded. GetOperationDetail provides additional information, for example, Domain Transfer from Aws Account 111122223333 has been cancelled.

" }, "RenewDomain":{ "name":"RenewDomain", @@ -248,7 +291,7 @@ {"shape":"TLDRulesViolation"}, {"shape":"OperationLimitExceeded"} ], - "documentation":"

This operation renews a domain for the specified number of years. The cost of renewing your domain is billed to your AWS account.

We recommend that you renew your domain several weeks before the expiration date. Some TLD registries delete domains before the expiration date if you haven't renewed far enough in advance. For more information about renewing domain registration, see Renewing Registration for a Domain in the Amazon Route 53 Developer Guide.

" + "documentation":"

This operation renews a domain for the specified number of years. The cost of renewing your domain is billed to your AWS account.

We recommend that you renew your domain several weeks before the expiration date. Some TLD registries delete domains before the expiration date if you haven't renewed far enough in advance. For more information about renewing domain registration, see Renewing Registration for a Domain in the Amazon Route 53 Developer Guide.

" }, "ResendContactReachabilityEmail":{ "name":"ResendContactReachabilityEmail", @@ -295,7 +338,22 @@ {"shape":"DomainLimitExceeded"}, {"shape":"OperationLimitExceeded"} ], - "documentation":"

This operation transfers a domain from another registrar to Amazon Route 53. When the transfer is complete, the domain is registered either with Amazon Registrar (for .com, .net, and .org domains) or with our registrar associate, Gandi (for all other TLDs).

For transfer requirements, a detailed procedure, and information about viewing the status of a domain transfer, see Transferring Registration for a Domain to Amazon Route 53 in the Amazon Route 53 Developer Guide.

If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you consider transferring your DNS service to Amazon Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

" + "documentation":"

Transfers a domain from another registrar to Amazon Route 53. When the transfer is complete, the domain is registered either with Amazon Registrar (for .com, .net, and .org domains) or with our registrar associate, Gandi (for all other TLDs).

For more information about transferring domains, see the following topics:

If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

" + }, + "TransferDomainToAnotherAwsAccount":{ + "name":"TransferDomainToAnotherAwsAccount", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TransferDomainToAnotherAwsAccountRequest"}, + "output":{"shape":"TransferDomainToAnotherAwsAccountResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"OperationLimitExceeded"}, + {"shape":"DuplicateRequest"} + ], + "documentation":"

Transfers a domain from the current AWS account to another AWS account. Note the following:

When you transfer a domain from one AWS account to another, Route 53 doesn't transfer the hosted zone that is associated with the domain. DNS resolution isn't affected if the domain and the hosted zone are owned by separate accounts, so transferring the hosted zone is optional. For information about transferring the hosted zone to another AWS account, see Migrating a Hosted Zone to a Different AWS Account in the Amazon Route 53 Developer Guide.

Use either ListOperations or GetOperationDetail to determine whether the operation succeeded. GetOperationDetail provides additional information, for example, Domain Transfer from Aws Account 111122223333 has been cancelled.

" }, "UpdateDomainContact":{ "name":"UpdateDomainContact", @@ -329,7 +387,7 @@ {"shape":"OperationLimitExceeded"}, {"shape":"UnsupportedTLD"} ], - "documentation":"

This operation updates the specified domain contact's privacy setting. When privacy protection is enabled, contact information such as email address is replaced either with contact information for Amazon Registrar (for .com, .net, and .org domains) or with contact information for our registrar associate, Gandi.

This operation affects only the contact information for the specified contact type (registrant, administrator, or tech). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email.

" + "documentation":"

This operation updates the specified domain contact's privacy setting. When privacy protection is enabled, contact information such as email address is replaced either with contact information for Amazon Registrar (for .com, .net, and .org domains) or with contact information for our registrar associate, Gandi.

This operation affects only the contact information for the specified contact type (registrant, administrator, or tech). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email.

By disabling the privacy service via API, you consent to the publication of the contact information provided for this domain via the public WHOIS database. You certify that you are the registrant of this domain name and have the authority to make this decision. You may withdraw your consent at any time by enabling privacy protection using either UpdateDomainContactPrivacy or the Route 53 console. Enabling privacy protection removes the contact information provided for this domain from the WHOIS database. For more information on our privacy practices, see https://aws.amazon.com/privacy/.

" }, "UpdateDomainNameservers":{ "name":"UpdateDomainNameservers", @@ -378,6 +436,38 @@ } }, "shapes":{ + "AcceptDomainTransferFromAnotherAwsAccountRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Password" + ], + "members":{ + "DomainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain that was specified when another AWS account submitted a TransferDomainToAnotherAwsAccount request.

" + }, + "Password":{ + "shape":"String", + "documentation":"

The password that was returned by the TransferDomainToAnotherAwsAccount request.

" + } + }, + "documentation":"

The AcceptDomainTransferFromAnotherAwsAccount request includes the following elements.

" + }, + "AcceptDomainTransferFromAnotherAwsAccountResponse":{ + "type":"structure", + "members":{ + "OperationId":{ + "shape":"OperationId", + "documentation":"

Identifier for tracking the progress of the request. To query the operation status, use GetOperationDetail.

" + } + }, + "documentation":"

The AcceptDomainTransferFromAnotherAwsAccount response includes the following element.

" + }, + "AccountId":{ + "type":"string", + "pattern":"^(\\d{12})$" + }, "AddressLine":{ "type":"string", "max":255 @@ -387,7 +477,7 @@ "members":{ "DomainName":{ "shape":"DomainName", - "documentation":"

The name of the domain that the billing record applies to. If the domain name contains characters other than a-z, 0-9, and - (hyphen), such as an internationalized domain name, then this value is in Punycode. For more information, see DNS Domain Name Format in the Amazon Route 53 Developer Guidezzz.

" + "documentation":"

The name of the domain that the billing record applies to. If the domain name contains characters other than a-z, 0-9, and - (hyphen), such as an internationalized domain name, then this value is in Punycode. For more information, see DNS Domain Name Format in the Amazon Route 53 Developer Guide.

" }, "Operation":{ "shape":"OperationType", @@ -413,13 +503,34 @@ "member":{"shape":"BillingRecord"} }, "Boolean":{"type":"boolean"}, + "CancelDomainTransferToAnotherAwsAccountRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain for which you want to cancel the transfer to another AWS account.

" + } + }, + "documentation":"

The CancelDomainTransferToAnotherAwsAccount request includes the following element.

" + }, + "CancelDomainTransferToAnotherAwsAccountResponse":{ + "type":"structure", + "members":{ + "OperationId":{ + "shape":"OperationId", + "documentation":"

The identifier that TransferDomainToAnotherAwsAccount returned to track the progress of the request. Because the transfer request was canceled, the value is no longer valid, and you can't use GetOperationDetail to query the operation status.

" + } + }, + "documentation":"

The CancelDomainTransferToAnotherAwsAccount response includes the following element.

" + }, "CheckDomainAvailabilityRequest":{ "type":"structure", "required":["DomainName"], "members":{ "DomainName":{ "shape":"DomainName", - "documentation":"

The name of the domain that you want to get availability for.

Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

" + "documentation":"

The name of the domain that you want to get availability for. The top-level domain (TLD), such as .com, must be a TLD that Route 53 supports. For a list of supported TLDs, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide.

The domain name can contain only the following characters:

  • Letters a through z. Domain names are not case sensitive.

  • Numbers 0 through 9.

  • Hyphen (-). You can't specify a hyphen at the beginning or end of a label.

  • Period (.) to separate the labels in the name, such as the . in example.com.

Internationalized domain names are not supported for some top-level domains. To determine whether the TLD that you want to use supports internationalized domain names, see Domains that You Can Register with Amazon Route 53. For more information, see Formatting Internationalized Domain Names.

" }, "IdnLangCode":{ "shape":"LangCode", @@ -434,7 +545,7 @@ "members":{ "Availability":{ "shape":"DomainAvailability", - "documentation":"

Whether the domain name is available for registering.

You can register only domains designated as AVAILABLE.

Valid values:

AVAILABLE

The domain name is available.

AVAILABLE_RESERVED

The domain name is reserved under specific conditions.

AVAILABLE_PREORDER

The domain name is available and can be preordered.

DONT_KNOW

The TLD registry didn't reply with a definitive answer about whether the domain name is available. Amazon Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.

PENDING

The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.

RESERVED

The domain name has been reserved for another person or organization.

UNAVAILABLE

The domain name is not available.

UNAVAILABLE_PREMIUM

The domain name is not available.

UNAVAILABLE_RESTRICTED

The domain name is forbidden.

" + "documentation":"

Whether the domain name is available for registering.

You can register only domains designated as AVAILABLE.

Valid values:

AVAILABLE

The domain name is available.

AVAILABLE_RESERVED

The domain name is reserved under specific conditions.

AVAILABLE_PREORDER

The domain name is available and can be preordered.

DONT_KNOW

The TLD registry didn't reply with a definitive answer about whether the domain name is available. Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.

PENDING

The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.

RESERVED

The domain name has been reserved for another person or organization.

UNAVAILABLE

The domain name is not available.

UNAVAILABLE_PREMIUM

The domain name is not available.

UNAVAILABLE_RESTRICTED

The domain name is forbidden.

" } }, "documentation":"

The CheckDomainAvailability response includes the following elements.

" @@ -445,7 +556,7 @@ "members":{ "DomainName":{ "shape":"DomainName", - "documentation":"

The name of the domain that you want to transfer to Amazon Route 53.

Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

" + "documentation":"

The name of the domain that you want to transfer to Route 53. The top-level domain (TLD), such as .com, must be a TLD that Route 53 supports. For a list of supported TLDs, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide.

The domain name can contain only the following characters:

  • Letters a through z. Domain names are not case sensitive.

  • Numbers 0 through 9.

  • Hyphen (-). You can't specify a hyphen at the beginning or end of a label.

  • Period (.) to separate the labels in the name, such as the . in example.com.

" }, "AuthCode":{ "shape":"DomainAuthCode", @@ -460,7 +571,7 @@ "members":{ "Transferability":{ "shape":"DomainTransferability", - "documentation":"

A complex type that contains information about whether the specified domain can be transferred to Amazon Route 53.

" + "documentation":"

A complex type that contains information about whether the specified domain can be transferred to Route 53.

" } }, "documentation":"

The CheckDomainTransferability response includes the following elements.

" @@ -482,7 +593,7 @@ }, "ContactType":{ "shape":"ContactType", - "documentation":"

Indicates whether the contact is a person, company, association, or public organization. If you choose an option other than PERSON, you must enter an organization name, and you can't enable privacy protection for the contact.

" + "documentation":"

Indicates whether the contact is a person, company, association, or public organization. Note the following:

  • If you specify a value other than PERSON, you must also specify a value for OrganizationName.

  • For some TLDs, the privacy protection available depends on the value that you specify for Contact Type. For the privacy protection settings for your TLD, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide

  • For .es domains, if you specify PERSON, you must specify INDIVIDUAL for the value of ES_LEGAL_FORM.

" }, "OrganizationName":{ "shape":"ContactName", @@ -841,7 +952,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

" + "documentation":"

Identifier for tracking the progress of the request. To query the operation status, use GetOperationDetail.

" } }, "documentation":"

The DisableDomainTransferLock response includes the following element.

" @@ -893,7 +1004,7 @@ }, "Availability":{ "shape":"String", - "documentation":"

Whether the domain name is available for registering.

You can register only the domains that are designated as AVAILABLE.

Valid values:

AVAILABLE

The domain name is available.

AVAILABLE_RESERVED

The domain name is reserved under specific conditions.

AVAILABLE_PREORDER

The domain name is available and can be preordered.

DONT_KNOW

The TLD registry didn't reply with a definitive answer about whether the domain name is available. Amazon Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.

PENDING

The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.

RESERVED

The domain name has been reserved for another person or organization.

UNAVAILABLE

The domain name is not available.

UNAVAILABLE_PREMIUM

The domain name is not available.

UNAVAILABLE_RESTRICTED

The domain name is forbidden.

" + "documentation":"

Whether the domain name is available for registering.

You can register only the domains that are designated as AVAILABLE.

Valid values:

AVAILABLE

The domain name is available.

AVAILABLE_RESERVED

The domain name is reserved under specific conditions.

AVAILABLE_PREORDER

The domain name is available and can be preordered.

DONT_KNOW

The TLD registry didn't reply with a definitive answer about whether the domain name is available. Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.

PENDING

The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.

RESERVED

The domain name has been reserved for another person or organization.

UNAVAILABLE

The domain name is not available.

UNAVAILABLE_PREMIUM

The domain name is not available.

UNAVAILABLE_RESTRICTED

The domain name is forbidden.

" } }, "documentation":"

Information about one suggested domain name.

" @@ -920,7 +1031,7 @@ }, "Expiry":{ "shape":"Timestamp", - "documentation":"

Expiration date of the domain in Coordinated Universal Time (UTC).

" + "documentation":"

Expiration date of the domain in Unix time format and Coordinated Universal Time (UTC).

" } }, "documentation":"

Summary information about one domain.

" @@ -934,7 +1045,7 @@ "members":{ "Transferable":{"shape":"Transferable"} }, - "documentation":"

A complex type that contains information about whether the specified domain can be transferred to Amazon Route 53.

" + "documentation":"

A complex type that contains information about whether the specified domain can be transferred to Route 53.

" }, "DuplicateRequest":{ "type":"structure", @@ -1003,11 +1114,11 @@ "members":{ "Name":{ "shape":"ExtraParamName", - "documentation":"

Name of the additional parameter required by the top-level domain. Here are the top-level domains that require additional parameters and which parameters they require:

  • .com.au and .net.au: AU_ID_NUMBER and AU_ID_TYPE

  • .ca: BRAND_NUMBER, CA_LEGAL_TYPE, and CA_BUSINESS_ENTITY_TYPE

  • .es: ES_IDENTIFICATION, ES_IDENTIFICATION_TYPE, and ES_LEGAL_FORM

  • .fi: BIRTH_DATE_IN_YYYY_MM_DD, FI_BUSINESS_NUMBER, FI_ID_NUMBER, FI_NATIONALITY, and FI_ORGANIZATION_TYPE

  • .fr: BRAND_NUMBER, BIRTH_DEPARTMENT, BIRTH_DATE_IN_YYYY_MM_DD, BIRTH_COUNTRY, and BIRTH_CITY

  • .it: BIRTH_COUNTRY, IT_PIN, and IT_REGISTRANT_ENTITY_TYPE

  • .ru: BIRTH_DATE_IN_YYYY_MM_DD and RU_PASSPORT_DATA

  • .se: BIRTH_COUNTRY and SE_ID_NUMBER

  • .sg: SG_ID_NUMBER

  • .co.uk, .me.uk, and .org.uk: UK_CONTACT_TYPE and UK_COMPANY_NUMBER

In addition, many TLDs require VAT_NUMBER.

" + "documentation":"

The name of an additional parameter that is required by a top-level domain. Here are the top-level domains that require additional parameters and the names of the parameters that they require:

.com.au and .net.au
  • AU_ID_NUMBER

  • AU_ID_TYPE

    Valid values include the following:

    • ABN (Australian business number)

    • ACN (Australian company number)

    • TM (Trademark number)

.ca
  • BRAND_NUMBER

  • CA_BUSINESS_ENTITY_TYPE

    Valid values include the following:

    • BANK (Bank)

    • COMMERCIAL_COMPANY (Commercial company)

    • COMPANY (Company)

    • COOPERATION (Cooperation)

    • COOPERATIVE (Cooperative)

    • COOPRIX (Cooprix)

    • CORP (Corporation)

    • CREDIT_UNION (Credit union)

    • FOMIA (Federation of mutual insurance associations)

    • INC (Incorporated)

    • LTD (Limited)

    • LTEE (Limitée)

    • LLC (Limited liability corporation)

    • LLP (Limited liability partnership)

    • LTE (Lte.)

    • MBA (Mutual benefit association)

    • MIC (Mutual insurance company)

    • NFP (Not-for-profit corporation)

    • SA (S.A.)

    • SAVINGS_COMPANY (Savings company)

    • SAVINGS_UNION (Savings union)

    • SARL (Société à responsabilité limitée)

    • TRUST (Trust)

    • ULC (Unlimited liability corporation)

  • CA_LEGAL_TYPE

    When ContactType is PERSON, valid values include the following:

    • ABO (Aboriginal Peoples indigenous to Canada)

    • CCT (Canadian citizen)

    • LGR (Legal Representative of a Canadian Citizen or Permanent Resident)

    • RES (Permanent resident of Canada)

    When ContactType is a value other than PERSON, valid values include the following:

    • ASS (Canadian unincorporated association)

    • CCO (Canadian corporation)

    • EDU (Canadian educational institution)

    • GOV (Government or government entity in Canada)

    • HOP (Canadian Hospital)

    • INB (Indian Band recognized by the Indian Act of Canada)

    • LAM (Canadian Library, Archive, or Museum)

    • MAJ (Her/His Majesty the Queen/King)

    • OMK (Official mark registered in Canada)

    • PLT (Canadian Political Party)

    • PRT (Partnership Registered in Canada)

    • TDM (Trademark registered in Canada)

    • TRD (Canadian Trade Union)

    • TRS (Trust established in Canada)

.es
  • ES_IDENTIFICATION

    Specify the applicable value:

    • For contacts inside Spain: Enter your passport ID.

    • For contacts outside of Spain: Enter the VAT identification number for the company.

      For .es domains, the value of ContactType must be PERSON.

  • ES_IDENTIFICATION_TYPE

    Valid values include the following:

    • DNI_AND_NIF (For Spanish contacts)

    • NIE (For foreigners with legal residence)

    • OTHER (For contacts outside of Spain)

  • ES_LEGAL_FORM

    Valid values include the following:

    • ASSOCIATION

    • CENTRAL_GOVERNMENT_BODY

    • CIVIL_SOCIETY

    • COMMUNITY_OF_OWNERS

    • COMMUNITY_PROPERTY

    • CONSULATE

    • COOPERATIVE

    • DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL

    • ECONOMIC_INTEREST_GROUP

    • EMBASSY

    • ENTITY_MANAGING_NATURAL_AREAS

    • FARM_PARTNERSHIP

    • FOUNDATION

    • GENERAL_AND_LIMITED_PARTNERSHIP

    • GENERAL_PARTNERSHIP

    • INDIVIDUAL

    • LIMITED_COMPANY

    • LOCAL_AUTHORITY

    • LOCAL_PUBLIC_ENTITY

    • MUTUAL_INSURANCE_COMPANY

    • NATIONAL_PUBLIC_ENTITY

    • ORDER_OR_RELIGIOUS_INSTITUTION

    • OTHERS (Only for contacts outside of Spain)

    • POLITICAL_PARTY

    • PROFESSIONAL_ASSOCIATION

    • PUBLIC_LAW_ASSOCIATION

    • PUBLIC_LIMITED_COMPANY

    • REGIONAL_GOVERNMENT_BODY

    • REGIONAL_PUBLIC_ENTITY

    • SAVINGS_BANK

    • SPANISH_OFFICE

    • SPORTS_ASSOCIATION

    • SPORTS_FEDERATION

    • SPORTS_LIMITED_COMPANY

    • TEMPORARY_ALLIANCE_OF_ENTERPRISES

    • TRADE_UNION

    • WORKER_OWNED_COMPANY

    • WORKER_OWNED_LIMITED_COMPANY

.fi
  • BIRTH_DATE_IN_YYYY_MM_DD

  • FI_BUSINESS_NUMBER

  • FI_ID_NUMBER

  • FI_NATIONALITY

    Valid values include the following:

    • FINNISH

    • NOT_FINNISH

  • FI_ORGANIZATION_TYPE

    Valid values include the following:

    • COMPANY

    • CORPORATION

    • GOVERNMENT

    • INSTITUTION

    • POLITICAL_PARTY

    • PUBLIC_COMMUNITY

    • TOWNSHIP

.fr
  • BIRTH_CITY

  • BIRTH_COUNTRY

  • BIRTH_DATE_IN_YYYY_MM_DD

  • BIRTH_DEPARTMENT: Specify the INSEE code that corresponds with the department where the contact was born. If the contact was born somewhere other than France or its overseas departments, specify 99. For more information, including a list of departments and the corresponding INSEE numbers, see the Wikipedia entry Departments of France.

  • BRAND_NUMBER

.it
  • IT_NATIONALITY

  • IT_PIN

  • IT_REGISTRANT_ENTITY_TYPE

    Valid values include the following:

    • FOREIGNERS

    • FREELANCE_WORKERS (Freelance workers and professionals)

    • ITALIAN_COMPANIES (Italian companies and one-person companies)

    • NON_PROFIT_ORGANIZATIONS

    • OTHER_SUBJECTS

    • PUBLIC_ORGANIZATIONS

.ru
  • BIRTH_DATE_IN_YYYY_MM_DD

  • RU_PASSPORT_DATA

.se
  • BIRTH_COUNTRY

  • SE_ID_NUMBER

.sg
  • SG_ID_NUMBER

.co.uk, .me.uk, and .org.uk
  • UK_CONTACT_TYPE

    Valid values include the following:

    • CRC (UK Corporation by Royal Charter)

    • FCORP (Non-UK Corporation)

    • FIND (Non-UK Individual, representing self)

    • FOTHER (Non-UK Entity that does not fit into any other category)

    • GOV (UK Government Body)

    • IND (UK Individual (representing self))

    • IP (UK Industrial/Provident Registered Company)

    • LLP (UK Limited Liability Partnership)

    • LTD (UK Limited Company)

    • OTHER (UK Entity that does not fit into any other category)

    • PLC (UK Public Limited Company)

    • PTNR (UK Partnership)

    • RCHAR (UK Registered Charity)

    • SCH (UK School)

    • STAT (UK Statutory Body)

    • STRA (UK Sole Trader)

  • UK_COMPANY_NUMBER

In addition, many TLDs require a VAT_NUMBER.

" }, "Value":{ "shape":"ExtraParamValue", - "documentation":"

Values corresponding to the additional parameter names required by some top-level domains.

" + "documentation":"

The value that corresponds with the name of an extra parameter.

" } }, "documentation":"

ExtraParam includes the following elements.

" @@ -1030,6 +1141,8 @@ "AU_ID_TYPE", "CA_LEGAL_TYPE", "CA_BUSINESS_ENTITY_TYPE", + "CA_LEGAL_REPRESENTATIVE", + "CA_LEGAL_REPRESENTATIVE_CAPACITY", "ES_IDENTIFICATION", "ES_IDENTIFICATION_TYPE", "ES_LEGAL_FORM", @@ -1037,6 +1150,7 @@ "FI_ID_NUMBER", "FI_NATIONALITY", "FI_ORGANIZATION_TYPE", + "IT_NATIONALITY", "IT_PIN", "IT_REGISTRANT_ENTITY_TYPE", "RU_PASSPORT_DATA", @@ -1157,19 +1271,19 @@ }, "CreationDate":{ "shape":"Timestamp", - "documentation":"

The date when the domain was created as found in the response to a WHOIS query. The date and time is in Coordinated Universal time (UTC).

" + "documentation":"

The date when the domain was created as found in the response to a WHOIS query. The date and time is in Unix time format and Coordinated Universal time (UTC).

" }, "UpdatedDate":{ "shape":"Timestamp", - "documentation":"

The last updated date of the domain as found in the response to a WHOIS query. The date and time is in Coordinated Universal time (UTC).

" + "documentation":"

The last updated date of the domain as found in the response to a WHOIS query. The date and time is in Unix time format and Coordinated Universal time (UTC).

" }, "ExpirationDate":{ "shape":"Timestamp", - "documentation":"

The date when the registration for the domain is set to expire. The date and time is in Coordinated Universal time (UTC).

" + "documentation":"

The date when the registration for the domain is set to expire. The date and time is in Unix time format and Coordinated Universal time (UTC).

" }, "Reseller":{ "shape":"Reseller", - "documentation":"

Reseller of the domain. Domains registered or transferred using Amazon Route 53 domains will have \"Amazon\" as the reseller.

" + "documentation":"

Reseller of the domain. Domains registered or transferred using Route 53 domains will have \"Amazon\" as the reseller.

" }, "DnsSec":{ "shape":"DNSSec", @@ -1192,15 +1306,15 @@ "members":{ "DomainName":{ "shape":"DomainName", - "documentation":"

A domain name that you want to use as the basis for a list of possible domain names. The domain name must contain a top-level domain (TLD), such as .com, that Amazon Route 53 supports. For a list of TLDs, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide.

" + "documentation":"

A domain name that you want to use as the basis for a list of possible domain names. The top-level domain (TLD), such as .com, must be a TLD that Route 53 supports. For a list of supported TLDs, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide.

The domain name can contain only the following characters:

  • Letters a through z. Domain names are not case sensitive.

  • Numbers 0 through 9.

  • Hyphen (-). You can't specify a hyphen at the beginning or end of a label.

  • Period (.) to separate the labels in the name, such as the . in example.com.

Internationalized domain names are not supported for some top-level domains. To determine whether the TLD that you want to use supports internationalized domain names, see Domains that You Can Register with Amazon Route 53.

" }, "SuggestionCount":{ "shape":"Integer", - "documentation":"

The number of suggested domain names that you want Amazon Route 53 to return.

" + "documentation":"

The number of suggested domain names that you want Route 53 to return. Specify a value between 1 and 50.

" }, "OnlyAvailable":{ "shape":"Boolean", - "documentation":"

If OnlyAvailable is true, Amazon Route 53 returns only domain names that are available. If OnlyAvailable is false, Amazon Route 53 returns domain names without checking whether they're available to be registered. To determine whether the domain is available, you can call checkDomainAvailability for each suggestion.

" + "documentation":"

If OnlyAvailable is true, Route 53 returns only domain names that are available. If OnlyAvailable is false, Route 53 returns domain names without checking whether they're available to be registered. To determine whether the domain is available, you can call checkDomainAvailability for each suggestion.

" } } }, @@ -1219,10 +1333,10 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

The identifier for the operation for which you want to get the status. Amazon Route 53 returned the identifier in the response to the original request.

" + "documentation":"

The identifier for the operation for which you want to get the status. Route 53 returned the identifier in the response to the original request.

" } }, - "documentation":"

The GetOperationDetail request includes the following element.

" + "documentation":"

The GetOperationDetail request includes the following element.

" }, "GetOperationDetailResponse":{ "type":"structure", @@ -1276,7 +1390,7 @@ "documentation":"

The requested item is not acceptable. For example, for an OperationId it might refer to the ID of an operation that is already completed. For a domain name, it might not be a valid domain name or belong to the requester account.

" } }, - "documentation":"

The requested item is not acceptable. For example, for an OperationId it might refer to the ID of an operation that is already completed. For a domain name, it might not be a valid domain name or belong to the requester account.

", + "documentation":"

The requested item is not acceptable. For example, for APIs that accept a domain name, the request might specify a domain name that doesn't belong to the account that submitted the request. For AcceptDomainTransferFromAnotherAwsAccount, the password might be invalid.

", "exception":true }, "InvoiceId":{"type":"string"}, @@ -1318,7 +1432,7 @@ "members":{ "SubmittedSince":{ "shape":"Timestamp", - "documentation":"

An optional parameter that lets you get information about all the operations that you submitted after a specified date and time. Specify the date and time in Coordinated Universal time (UTC).

" + "documentation":"

An optional parameter that lets you get information about all the operations that you submitted after a specified date and time. Specify the date and time in Unix time format and Coordinated Universal time (UTC).

" }, "Marker":{ "shape":"PageMarker", @@ -1462,7 +1576,9 @@ "TRANSFER_OUT_DOMAIN", "CHANGE_DOMAIN_OWNER", "RENEW_DOMAIN", - "PUSH_DOMAIN" + "PUSH_DOMAIN", + "INTERNAL_TRANSFER_OUT_DOMAIN", + "INTERNAL_TRANSFER_IN_DOMAIN" ] }, "PageMarker":{ @@ -1494,7 +1610,7 @@ "members":{ "DomainName":{ "shape":"DomainName", - "documentation":"

The domain name that you want to register.

Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

" + "documentation":"

The domain name that you want to register. The top-level domain (TLD), such as .com, must be a TLD that Route 53 supports. For a list of supported TLDs, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide.

The domain name can contain only the following characters:

  • Letters a through z. Domain names are not case sensitive.

  • Numbers 0 through 9.

  • Hyphen (-). You can't specify a hyphen at the beginning or end of a label.

  • Period (.) to separate the labels in the name, such as the . in example.com.

Internationalized domain names are not supported for some top-level domains. To determine whether the TLD that you want to use supports internationalized domain names, see Domains that You Can Register with Amazon Route 53. For more information, see Formatting Internationalized Domain Names.

" }, "IdnLangCode":{ "shape":"LangCode", @@ -1502,7 +1618,7 @@ }, "DurationInYears":{ "shape":"DurationInYears", - "documentation":"

The number of years that you want to register the domain for. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain. For the range of valid values for your domain, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide.

Default: 1

" + "documentation":"

The number of years that you want to register the domain for. Domains are registered for a minimum of one year. The maximum period depends on the top-level domain. For the range of valid values for your domain, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide.

Default: 1

" }, "AutoRenew":{ "shape":"Boolean", @@ -1510,15 +1626,15 @@ }, "AdminContact":{ "shape":"ContactDetail", - "documentation":"

Provides detailed contact information.

" + "documentation":"

Provides detailed contact information. For information about the values that you specify for each element, see ContactDetail.

" }, "RegistrantContact":{ "shape":"ContactDetail", - "documentation":"

Provides detailed contact information.

" + "documentation":"

Provides detailed contact information. For information about the values that you specify for each element, see ContactDetail.

" }, "TechContact":{ "shape":"ContactDetail", - "documentation":"

Provides detailed contact information.

" + "documentation":"

Provides detailed contact information. For information about the values that you specify for each element, see ContactDetail.

" }, "PrivacyProtectAdminContact":{ "shape":"Boolean", @@ -1541,7 +1657,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

" + "documentation":"

Identifier for tracking the progress of the request. To query the operation status, use GetOperationDetail.

" } }, "documentation":"

The RegisterDomain response includes the following element.

" @@ -1550,6 +1666,27 @@ "RegistrarUrl":{"type":"string"}, "RegistrarWhoIsServer":{"type":"string"}, "RegistryDomainId":{"type":"string"}, + "RejectDomainTransferFromAnotherAwsAccountRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain that was specified when another AWS account submitted a TransferDomainToAnotherAwsAccount request.

" + } + }, + "documentation":"

The RejectDomainTransferFromAnotherAwsAccount request includes the following element.

" + }, + "RejectDomainTransferFromAnotherAwsAccountResponse":{ + "type":"structure", + "members":{ + "OperationId":{ + "shape":"OperationId", + "documentation":"

The identifier that TransferDomainToAnotherAwsAccount returned to track the progress of the request. Because the transfer request was rejected, the value is no longer valid, and you can't use GetOperationDetail to query the operation status.

" + } + }, + "documentation":"

The RejectDomainTransferFromAnotherAwsAccount response includes the following element.

" + }, "RenewDomainRequest":{ "type":"structure", "required":[ @@ -1563,7 +1700,7 @@ }, "DurationInYears":{ "shape":"DurationInYears", - "documentation":"

The number of years that you want to renew the domain for. The maximum number of years depends on the top-level domain. For the range of valid values for your domain, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide.

Default: 1

" + "documentation":"

The number of years that you want to renew the domain for. The maximum number of years depends on the top-level domain. For the range of valid values for your domain, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide.

Default: 1

" }, "CurrentExpiryYear":{ "shape":"CurrentExpiryYear", @@ -1578,7 +1715,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

The identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

" + "documentation":"

Identifier for tracking the progress of the request. To query the operation status, use GetOperationDetail.

" } } }, @@ -1588,7 +1725,7 @@ "members":{ "domainName":{ "shape":"DomainName", - "documentation":"

The name of the domain for which you want Amazon Route 53 to resend a confirmation email to the registrant contact.

" + "documentation":"

The name of the domain for which you want Route 53 to resend a confirmation email to the registrant contact.

" } } }, @@ -1684,7 +1821,7 @@ "members":{ "DomainName":{ "shape":"DomainName", - "documentation":"

The name of the domain that you want to transfer to Amazon Route 53.

Constraints: The domain name can contain only the letters a through z, the numbers 0 through 9, and hyphen (-). Internationalized Domain Names are not supported.

" + "documentation":"

The name of the domain that you want to transfer to Route 53. The top-level domain (TLD), such as .com, must be a TLD that Route 53 supports. For a list of supported TLDs, see Domains that You Can Register with Amazon Route 53 in the Amazon Route 53 Developer Guide.

The domain name can contain only the following characters:

  • Letters a through z. Domain names are not case sensitive.

  • Numbers 0 through 9.

  • Hyphen (-). You can't specify a hyphen at the beginning or end of a label.

  • Period (.) to separate the labels in the name, such as the . in example.com.

" }, "IdnLangCode":{ "shape":"LangCode", @@ -1739,14 +1876,46 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

" + "documentation":"

Identifier for tracking the progress of the request. To query the operation status, use GetOperationDetail.

" + } + }, + "documentation":"

The TransferDomain response includes the following element.

" + }, + "TransferDomainToAnotherAwsAccountRequest":{ + "type":"structure", + "required":[ + "DomainName", + "AccountId" + ], + "members":{ + "DomainName":{ + "shape":"DomainName", + "documentation":"

The name of the domain that you want to transfer from the current AWS account to another account.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The account ID of the AWS account that you want to transfer the domain to, for example, 111122223333.

" + } + }, + "documentation":"

The TransferDomainToAnotherAwsAccount request includes the following elements.

" + }, + "TransferDomainToAnotherAwsAccountResponse":{ + "type":"structure", + "members":{ + "OperationId":{ + "shape":"OperationId", + "documentation":"

Identifier for tracking the progress of the request. To query the operation status, use GetOperationDetail.

" + }, + "Password":{ + "shape":"String", + "documentation":"

To finish transferring a domain to another AWS account, the account that the domain is being transferred to must submit an AcceptDomainTransferFromAnotherAwsAccount request. The request must include the value of the Password element that was returned in the TransferDomainToAnotherAwsAccount response.

" } }, - "documentation":"

The TranserDomain response includes the following element.

" + "documentation":"

The TransferDomainToAnotherAwsAccount response includes the following elements.

" }, "Transferable":{ "type":"string", - "documentation":"

Whether the domain name can be transferred to Amazon Route 53.

You can transfer only domains that have a value of TRANSFERABLE for Transferable.

Valid values:

TRANSFERABLE

The domain name can be transferred to Amazon Route 53.

UNTRANSFERRABLE

The domain name can't be transferred to Amazon Route 53.

DONT_KNOW

Reserved for future use.

", + "documentation":"

Whether the domain name can be transferred to Route 53.

You can transfer only domains that have a value of TRANSFERABLE for Transferable.

Valid values:

TRANSFERABLE

The domain name can be transferred to Route 53.

UNTRANSFERRABLE

The domain name can't be transferred to Route 53.

DONT_KNOW

Reserved for future use.

", "enum":[ "TRANSFERABLE", "UNTRANSFERABLE", @@ -1827,7 +1996,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

" + "documentation":"

Identifier for tracking the progress of the request. To query the operation status, use GetOperationDetail.

" } }, "documentation":"

The UpdateDomainContact response includes the following element.

" @@ -1861,7 +2030,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.

" + "documentation":"

Identifier for tracking the progress of the request. To query the operation status, use GetOperationDetail.

" } }, "documentation":"

The UpdateDomainNameservers response includes the following element.

" @@ -1891,11 +2060,11 @@ "members":{ "Start":{ "shape":"Timestamp", - "documentation":"

The beginning date and time for the time period for which you want a list of billing records. Specify the date and time in Coordinated Universal time (UTC).

" + "documentation":"

The beginning date and time for the time period for which you want a list of billing records. Specify the date and time in Unix time format and Coordinated Universal time (UTC).

" }, "End":{ "shape":"Timestamp", - "documentation":"

The end date and time for the time period for which you want a list of billing records. Specify the date and time in Coordinated Universal time (UTC).

" + "documentation":"

The end date and time for the time period for which you want a list of billing records. Specify the date and time in Unix time format and Coordinated Universal time (UTC).

" }, "Marker":{ "shape":"PageMarker", diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index 57ea19699f70..0fa585e3c8f7 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/s3/pom.xml b/services/s3/pom.xml index d6f98742373d..da135db5c639 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectIntegrationTest.java index 80a2665ce519..fcb78ef29325 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/GetObjectIntegrationTest.java @@ -74,7 +74,6 @@ public void toInputStream() throws Exception { } } - @Test public void toInputStream_loadFromProperties() throws IOException { s3.putObject(b -> b.bucket(BUCKET).key(PROPERTY_KEY), RequestBody.fromString("test: test")); @@ -117,6 +116,13 @@ public void customResponseHandler_InterceptorRecievesResponsePojo() throws Excep } } + @Test + public void contentRangeIsReturnedForRangeRequests() { + ResponseInputStream stream = s3.getObject(getObjectRequest.copy(r -> r.range("bytes=0-1"))); + stream.abort(); + assertThat(stream.response().contentRange()).isEqualTo("bytes 0-1/10000"); + } + private S3Client createClientWithInterceptor(ExecutionInterceptor interceptor) { return s3ClientBuilder().overrideConfiguration(ClientOverrideConfiguration.builder() .addExecutionInterceptor(interceptor) diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/GetObjectInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/GetObjectInterceptor.java new file mode 100644 index 000000000000..f9973fed3617 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/GetObjectInterceptor.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.handlers; + +import java.util.Optional; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkResponse; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; + +/** + * Interceptor for {@link GetObjectRequest} messages. + */ +@SdkInternalApi +public class GetObjectInterceptor implements ExecutionInterceptor { + @Override + public SdkResponse modifyResponse(Context.ModifyResponse context, ExecutionAttributes executionAttributes) { + SdkResponse response = context.response(); + if (!(response instanceof GetObjectResponse)) { + return response; + } + + return fixContentRange(response, context.httpResponse()); + } + + /** + * S3 currently returns content-range in two possible headers: Content-Range or x-amz-content-range based on the x-amz-te + * in the request. This will check the x-amz-content-range if the modeled header (Content-Range) wasn't populated. + */ + private SdkResponse fixContentRange(SdkResponse sdkResponse, SdkHttpResponse httpResponse) { + // Use the modeled content range header, if the service returned it. + GetObjectResponse getObjectResponse = (GetObjectResponse) sdkResponse; + if (getObjectResponse.contentRange() != null) { + return getObjectResponse; + } + + // If the service didn't use the modeled content range header, check the x-amz-content-range header. + Optional xAmzContentRange = httpResponse.firstMatchingHeader("x-amz-content-range"); + if (!xAmzContentRange.isPresent()) { + return getObjectResponse; + } + + return getObjectResponse.copy(r -> r.contentRange(xAmzContentRange.get())); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/presigner/DefaultS3Presigner.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/presigner/DefaultS3Presigner.java index 6df831bbbcc0..70f3725ef706 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/presigner/DefaultS3Presigner.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/presigner/DefaultS3Presigner.java @@ -98,9 +98,13 @@ @SdkInternalApi public final class DefaultS3Presigner extends DefaultSdkPresigner implements S3Presigner { private static final AwsS3V4Signer DEFAULT_SIGNER = AwsS3V4Signer.create(); + private static final S3Configuration DEFAULT_S3_CONFIGURATION = S3Configuration.builder() + .checksumValidationEnabled(false) + .build(); private static final String SERVICE_NAME = "s3"; private static final String SIGNING_NAME = "s3"; + private final S3Configuration serviceConfiguration; private final List clientInterceptors; private final GetObjectRequestMarshaller getObjectRequestMarshaller; private final PutObjectRequestMarshaller putObjectRequestMarshaller; @@ -112,6 +116,8 @@ public final class DefaultS3Presigner extends DefaultSdkPresigner implements S3P private DefaultS3Presigner(Builder b) { super(b); + this.serviceConfiguration = b.serviceConfiguration != null ? b.serviceConfiguration : DEFAULT_S3_CONFIGURATION; + this.clientInterceptors = initializeInterceptors(); // Copied from DefaultS3Client#init @@ -236,6 +242,10 @@ public PresignedAbortMultipartUploadRequest presignAbortMultipartUpload(AbortMul .build(); } + protected S3Configuration serviceConfiguration() { + return serviceConfiguration; + } + /** * Generate a {@link PresignedRequest} from a {@link PresignedRequest} and {@link SdkRequest}. */ @@ -289,9 +299,7 @@ private ExecutionContext createExecutionContext(PresignRequest presignRequest, S .putAttribute(SdkExecutionAttribute.CLIENT_TYPE, ClientType.SYNC) .putAttribute(SdkExecutionAttribute.SERVICE_NAME, SERVICE_NAME) .putAttribute(SdkExecutionAttribute.OPERATION_NAME, operationName) - .putAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG, S3Configuration.builder() - .checksumValidationEnabled(false) - .build()) + .putAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG, serviceConfiguration()) .putAttribute(PRESIGNER_EXPIRATION, signatureExpiration); ExecutionInterceptorChain executionInterceptorChain = new ExecutionInterceptorChain(clientInterceptors); @@ -464,9 +472,24 @@ private void initializePresignedRequest(PresignedRequest.Builder presignedReques public static final class Builder extends DefaultSdkPresigner.Builder implements S3Presigner.Builder { + private S3Configuration serviceConfiguration; + private Builder() { } + /** + * Allows providing a custom S3 serviceConfiguration by providing a {@link S3Configuration} object; + * + * Note: chunkedEncodingEnabled and checksumValidationEnabled do not apply to presigned requests. + * + * @param serviceConfiguration {@link S3Configuration} + * @return this Builder + */ + public Builder serviceConfiguration(S3Configuration serviceConfiguration) { + this.serviceConfiguration = serviceConfiguration; + return this; + } + @Override public S3Presigner build() { return new DefaultS3Presigner(this); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointBuilder.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointBuilder.java index be2ddd2b1f42..5f53677a1f82 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointBuilder.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/resource/S3AccessPointBuilder.java @@ -21,6 +21,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.SdkClientException; /** * This class is used to construct an endpoint host for an S3 access point. @@ -102,7 +103,11 @@ public URI toUri() { String dualStackSegment = Boolean.TRUE.equals(dualstackEnabled) ? ".dualstack" : ""; String uriString = String.format("%s://%s-%s.s3-accesspoint%s.%s.%s", protocol, urlEncode(accessPointName), accountId, dualStackSegment, region, domain); - return URI.create(uriString); + URI uri = URI.create(uriString); + if (uri.getHost() == null) { + throw SdkClientException.create("ARN region (" + region + ") resulted in an invalid URI:" + uri); + } + return uri; } private static void validateHostnameCompliant(String hostnameComponent, String paramName) { diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java index a2a3510d632c..db462c5f8bec 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java @@ -30,6 +30,7 @@ import software.amazon.awssdk.http.SdkHttpClient; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain; +import software.amazon.awssdk.services.s3.S3Configuration; import software.amazon.awssdk.services.s3.internal.presigner.DefaultS3Presigner; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; @@ -512,6 +513,16 @@ default PresignedAbortMultipartUploadRequest presignAbortMultipartUpload( @SdkPublicApi @NotThreadSafe interface Builder extends SdkPresigner.Builder { + /** + * Allows providing a custom S3 serviceConfiguration by providing a {@link S3Configuration} object; + * + * Note: chunkedEncodingEnabled and checksumValidationEnabled do not apply to presigned requests. + * + * @param serviceConfiguration {@link S3Configuration} + * @return this Builder + */ + Builder serviceConfiguration(S3Configuration serviceConfiguration); + @Override Builder region(Region region); diff --git a/services/s3/src/main/resources/codegen-resources/service-2.json b/services/s3/src/main/resources/codegen-resources/service-2.json index 027a0f0d6ea2..e03f78e562a5 100644 --- a/services/s3/src/main/resources/codegen-resources/service-2.json +++ b/services/s3/src/main/resources/codegen-resources/service-2.json @@ -37,7 +37,7 @@ "input":{"shape":"CompleteMultipartUploadRequest"}, "output":{"shape":"CompleteMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html", - "documentation":"

Completes a multipart upload by assembling previously uploaded parts.

You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This operation concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag value, returned after that part was uploaded.

Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.

Note that if CompleteMultipartUpload fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload.

For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

GetBucketLifecycle has the following special errors:

  • Error code: EntityTooSmall

    • Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.

    • 400 Bad Request

  • Error code: InvalidPart

    • Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.

    • 400 Bad Request

  • Error code: InvalidPartOrder

    • Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.

    • 400 Bad Request

  • Error code: NoSuchUpload

    • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

    • 404 Not Found

The following operations are related to DeleteBucketMetricsConfiguration:

" + "documentation":"

Completes a multipart upload by assembling previously uploaded parts.

You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This operation concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag value, returned after that part was uploaded.

Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.

Note that if CompleteMultipartUpload fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload.

For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

GetBucketLifecycle has the following special errors:

  • Error code: EntityTooSmall

    • Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.

    • 400 Bad Request

  • Error code: InvalidPart

    • Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.

    • 400 Bad Request

  • Error code: InvalidPartOrder

    • Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.

    • 400 Bad Request

  • Error code: NoSuchUpload

    • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

    • 404 Not Found

The following operations are related to CompleteMultipartUpload:

" }, "CopyObject":{ "name":"CopyObject", @@ -51,7 +51,7 @@ {"shape":"ObjectNotInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, for copying an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

Amazon S3 transfer acceleration does not support cross-region copies. If you request a cross-region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information about transfer acceleration, see Transfer Acceleration.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the request parameters x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, or x-amz-copy-source-if-modified-since.

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

You can use this operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes.

The source object that you are copying can be encrypted or unencrypted. If the source object is encrypted, it can be encrypted by server-side encryption using AWS managed encryption keys or by using a customer-provided encryption key. When copying an object, you can request that Amazon S3 encrypt the target object by using either the AWS managed encryption keys or by using your own encryption key. You can do this regardless of the form of server-side encryption that was used to encrypt the source, or even if the source object was not encrypted. For more information about server-side encryption, see Using Server-Side Encryption.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

Consider the following when using request headers:

  • Consideration 1 – If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

    • x-amz-copy-source-if-match condition evaluates to true

    • x-amz-copy-source-if-unmodified-since condition evaluates to false

  • Consideration 2 – If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

    • x-amz-copy-source-if-none-match condition evaluates to false

    • x-amz-copy-source-if-modified-since condition evaluates to true

The copy request charge is based on the storage class and Region you specify for the destination object. For pricing information, see Amazon S3 Pricing.

Following are other considerations when using CopyObject:

Versioning

By default, x-amz-copy-source identifies the current version of an object to copy. (If the current version is a delete marker, Amazon S3 behaves as if the object was deleted.) To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see .

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

To encrypt the target object, you must provide the appropriate encryption-related request headers. The one you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • To encrypt the target object using server-side encryption with an AWS managed encryption key, provide the following request headers, as appropriate.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id of the symmetric customer managed CMK. Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in KMS.

  • To encrypt the target object using server-side encryption with an encryption key that you provide, use the following headers.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

  • If the source object is encrypted using server-side encryption with customer-provided encryption keys, you must use the following headers.

    • x-amz-copy-source​-server-side​-encryption​-customer-algorithm

    • x-amz-copy-source​-server-side​-encryption​-customer-key

    • x-amz-copy-source-​server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

The following operations are related to CopyObject:

For more information, see Copying Objects.

", + "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.

All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

If the copy is successful, you receive a response with information about the copied object.

If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Metadata

When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.

x-amz-copy-source-if Headers

To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the following request parameters:

  • x-amz-copy-source-if-match

  • x-amz-copy-source-if-none-match

  • x-amz-copy-source-if-unmodified-since

  • x-amz-copy-source-if-modified-since

If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

  • x-amz-copy-source-if-match condition evaluates to true

  • x-amz-copy-source-if-unmodified-since condition evaluates to false

If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

  • x-amz-copy-source-if-none-match condition evaluates to false

  • x-amz-copy-source-if-modified-since condition evaluates to true

All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

Encryption

The source object that you are copying can be encrypted or unencrypted. The source object can be encrypted with server-side encryption using AWS managed encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it.

You can optionally use the appropriate encryption-related headers to request server-side encryption for the target object. You have the option to provide your own encryption key or use SSE-S3 or SSE-KMS, regardless of the form of server-side encryption that was used to encrypt the source object. You can even request encryption if the source object was not encrypted. For more information about server-side encryption, see Using Server-Side Encryption.

Access Control List (ACL)-Specific Request Headers

When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

You can use the CopyObject operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

By default, x-amz-copy-source identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId subresource.

If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see .

The following operations are related to CopyObject:

For more information, see Copying Objects.

", "alias":"PutObjectCopy" }, "CreateBucket":{ @@ -67,7 +67,7 @@ {"shape":"BucketAlreadyOwnedByYou"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html", - "documentation":"

Creates a new bucket. To create a bucket, you must register with Amazon S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.

By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the EU (Ireland) Region. For more information, see How to Select a Region for Your Buckets.

If you send your create bucket request to the s3.amazonaws.com endpoint, the request goes to the us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual Hosting of Buckets.

When creating a bucket using this operation, you can optionally specify the accounts or groups that should be granted specific permissions on the bucket. There are two ways to grant the appropriate permissions using the request headers.

  • Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

The following operations are related to CreateBucket:

", + "documentation":"

Creates a new bucket. To create a bucket, you must register with Amazon S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.

By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see How to Select a Region for Your Buckets.

If you send your create bucket request to the s3.amazonaws.com endpoint, the request goes to the us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual Hosting of Buckets.

When creating a bucket using this operation, you can optionally specify the accounts or groups that should be granted specific permissions on the bucket. There are two ways to grant the appropriate permissions using the request headers.

  • Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

    You specify each grantee as a type=value pair, where the type is one of the following:

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:

    x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

The following operations are related to CreateBucket:

", "alias":"PutBucket" }, "CreateMultipartUpload":{ @@ -79,7 +79,7 @@ "input":{"shape":"CreateMultipartUploadRequest"}, "output":{"shape":"CreateMultipartUploadOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", - "documentation":"

This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.

For more information about multipart uploads, see Multipart Upload Overview.

If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).

After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.

You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart) and UploadPartCopy) requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload.

To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.

If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.

For more information, see Protecting Data Using Server-Side Encryption.

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

The following operations are related to CreateMultipartUpload:

", + "documentation":"

This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.

For more information about multipart uploads, see Multipart Upload Overview.

If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).

After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.

You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart) and UploadPartCopy) requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload.

To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.

If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.

For more information, see Protecting Data Using Server-Side Encryption.

Access Permissions

When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:

    x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"

The following operations are related to CreateMultipartUpload:

", "alias":"InitiateMultipartUpload" }, "DeleteBucket":{ @@ -101,7 +101,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketAnalyticsConfigurationRequest"}, - "documentation":"

Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).

To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to DeleteBucketAnalyticsConfiguration:

" + "documentation":"

Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).

To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to DeleteBucketAnalyticsConfiguration:

" }, "DeleteBucketCors":{ "name":"DeleteBucketCors", @@ -122,7 +122,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketEncryptionRequest"}, - "documentation":"

This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

" }, "DeleteBucketInventoryConfiguration":{ "name":"DeleteBucketInventoryConfiguration", @@ -231,7 +231,8 @@ "output":{"shape":"DeleteObjectsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", "documentation":"

This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.

The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body.

When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.

Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.

The following operations are related to DeleteObjects:

", - "alias":"DeleteMultipleObjects" + "alias":"DeleteMultipleObjects", + "httpChecksumRequired":true }, "DeletePublicAccessBlock":{ "name":"DeletePublicAccessBlock", @@ -241,7 +242,7 @@ "responseCode":204 }, "input":{"shape":"DeletePublicAccessBlockRequest"}, - "documentation":"

Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to DeleteBucketMetricsConfiguration:

" + "documentation":"

Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to DeletePublicAccessBlock:

" }, "GetBucketAccelerateConfiguration":{ "name":"GetBucketAccelerateConfiguration", @@ -251,7 +252,7 @@ }, "input":{"shape":"GetBucketAccelerateConfigurationRequest"}, "output":{"shape":"GetBucketAccelerateConfigurationOutput"}, - "documentation":"

This implementation of the GET operation uses the accelerate subresource to return the Transfer Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.

To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended by using the PutBucketAccelerateConfiguration operation.

A GET accelerate request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.

For more information about transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

This implementation of the GET operation uses the accelerate subresource to return the Transfer Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.

To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended by using the PutBucketAccelerateConfiguration operation.

A GET accelerate request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.

For more information about transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.

Related Resources

" }, "GetBucketAcl":{ "name":"GetBucketAcl", @@ -325,7 +326,7 @@ }, "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, "output":{"shape":"GetBucketLifecycleConfigurationOutput"}, - "documentation":"

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are still using previous version of the lifecycle configuration, it works. For the earlier API description, see GetBucketLifecycle.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycleConfiguration has the following special error:

  • Error code: NoSuchLifecycleConfiguration

    • Description: The lifecycle configuration does not exist.

    • HTTP Status Code: 404 Not Found

    • SOAP Fault Code Prefix: Client

The following operations are related to DeleteBucketMetricsConfiguration:

" + "documentation":"

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are still using previous version of the lifecycle configuration, it works. For the earlier API description, see GetBucketLifecycle.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycleConfiguration has the following special error:

  • Error code: NoSuchLifecycleConfiguration

    • Description: The lifecycle configuration does not exist.

    • HTTP Status Code: 404 Not Found

    • SOAP Fault Code Prefix: Client

The following operations are related to GetBucketLifecycleConfiguration:

" }, "GetBucketLocation":{ "name":"GetBucketLocation", @@ -692,7 +693,8 @@ }, "input":{"shape":"PutBucketAclRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html", - "documentation":"

Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP permission.

You can use one of the following two ways to set a bucket's permissions:

  • Specify the ACL in the request body

  • Specify permissions using request headers

You cannot specify access permission using both the body and the request headers.

Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

Access Permissions

You can set access permissions using one of the following methods:

  • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two AWS accounts identified by their email addresses.

    x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

  • By Email address:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

    The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

  • By the person's ID:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

    DisplayName is optional and ignored in the request

  • By URI:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

Related Resources

" + "documentation":"

Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP permission.

You can use one of the following two ways to set a bucket's permissions:

  • Specify the ACL in the request body

  • Specify permissions using request headers

You cannot specify access permission using both the body and the request headers.

Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

Access Permissions

You can set access permissions using one of the following methods:

  • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

    You specify each grantee as a type=value pair, where the type is one of the following:

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two AWS accounts identified by their email addresses.

    x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

  • By the person's ID:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

    DisplayName is optional and ignored in the request

  • By URI:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

  • By Email address:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

    The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

    Using email addresses to specify a grantee is only supported in the following AWS Regions:

    • US East (N. Virginia)

    • US West (N. California)

    • US West (Oregon)

    • Asia Pacific (Singapore)

    • Asia Pacific (Sydney)

    • Asia Pacific (Tokyo)

    • Europe (Ireland)

    • South America (São Paulo)

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

Related Resources

", + "httpChecksumRequired":true }, "PutBucketAnalyticsConfiguration":{ "name":"PutBucketAnalyticsConfiguration", @@ -711,7 +713,8 @@ }, "input":{"shape":"PutBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html", - "documentation":"

Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

  • The request's Origin header must match AllowedOrigin elements.

  • The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method header in case of a pre-flight OPTIONS request must be one of the AllowedMethod elements.

  • Every header specified in the Access-Control-Request-Headers request header of a pre-flight request must match an AllowedHeader element.

For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

  • The request's Origin header must match AllowedOrigin elements.

  • The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method header in case of a pre-flight OPTIONS request must be one of the AllowedMethod elements.

  • Every header specified in the Access-Control-Request-Headers request header of a pre-flight request must match an AllowedHeader element.

For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

Related Resources

", + "httpChecksumRequired":true }, "PutBucketEncryption":{ "name":"PutBucketEncryption", @@ -720,7 +723,8 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "documentation":"

This implementation of the PUT operation uses the encryption subresource to set the default encryption state of an existing bucket.

This implementation of the PUT operation sets default encryption for a bucket using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS).

This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

" + "documentation":"

This implementation of the PUT operation uses the encryption subresource to set the default encryption state of an existing bucket.

This implementation of the PUT operation sets default encryption for a bucket using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

", + "httpChecksumRequired":true }, "PutBucketInventoryConfiguration":{ "name":"PutBucketInventoryConfiguration", @@ -729,7 +733,7 @@ "requestUri":"/{Bucket}?inventory" }, "input":{"shape":"PutBucketInventoryConfigurationRequest"}, - "documentation":"

This implementation of the PUT operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.

Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.

When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.

You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Special Errors

  • HTTP 400 Bad Request Error

    • Code: InvalidArgument

    • Cause: Invalid Argument

  • HTTP 400 Bad Request Error

    • Code: TooManyConfigurations

    • Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

  • HTTP 403 Forbidden Error

    • Code: AccessDenied

    • Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration bucket permission to set the configuration on the bucket

Related Resources

" + "documentation":"

This implementation of the PUT operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.

Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.

When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.

You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Special Errors

  • HTTP 400 Bad Request Error

    • Code: InvalidArgument

    • Cause: Invalid Argument

  • HTTP 400 Bad Request Error

    • Code: TooManyConfigurations

    • Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

  • HTTP 403 Forbidden Error

    • Code: AccessDenied

    • Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration bucket permission to set the configuration on the bucket.

Related Resources

" }, "PutBucketLifecycle":{ "name":"PutBucketLifecycle", @@ -739,8 +743,9 @@ }, "input":{"shape":"PutBucketLifecycleRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html", - "documentation":"

For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.

By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

  • s3:DeleteObject

  • s3:DeleteObjectVersion

  • s3:PutLifecycleConfiguration

For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

Related Resources

", - "deprecated":true + "documentation":"

For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.

By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

  • s3:DeleteObject

  • s3:DeleteObjectVersion

  • s3:PutLifecycleConfiguration

For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

Related Resources

", + "deprecated":true, + "httpChecksumRequired":true }, "PutBucketLifecycleConfiguration":{ "name":"PutBucketLifecycleConfiguration", @@ -749,7 +754,8 @@ "requestUri":"/{Bucket}?lifecycle" }, "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, - "documentation":"

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing Access Permissions to Your Amazon S3 Resources.

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

Rules

You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:

  • Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.

  • Status whether the rule is in effect.

  • One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.

For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

Permissions

By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

  • s3:DeleteObject

  • s3:DeleteObjectVersion

  • s3:PutLifecycleConfiguration

For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

The following are related to PutBucketLifecycleConfiguration:

" + "documentation":"

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing Access Permissions to Your Amazon S3 Resources.

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

Rules

You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:

  • Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.

  • Status whether the rule is in effect.

  • One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.

For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

Permissions

By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

  • s3:DeleteObject

  • s3:DeleteObjectVersion

  • s3:PutLifecycleConfiguration

For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

The following are related to PutBucketLifecycleConfiguration:

", + "httpChecksumRequired":true }, "PutBucketLogging":{ "name":"PutBucketLogging", @@ -759,7 +765,8 @@ }, "input":{"shape":"PutBucketLoggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html", - "documentation":"

Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same AWS Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

  • By the person's ID:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

    DisplayName is optional and ignored in the request.

  • By Email address:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

    The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

  • By URI:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

<BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

For more information about server access logging, see Server Access Logging.

For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

The following operations are related to PutBucketLogging:

" + "documentation":"

Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same AWS Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

  • By the person's ID:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

    DisplayName is optional and ignored in the request.

  • By Email address:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

    The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

  • By URI:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

<BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

For more information about server access logging, see Server Access Logging.

For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

The following operations are related to PutBucketLogging:

", + "httpChecksumRequired":true }, "PutBucketMetricsConfiguration":{ "name":"PutBucketMetricsConfiguration", @@ -779,7 +786,8 @@ "input":{"shape":"PutBucketNotificationRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTnotification.html", "documentation":"

No longer used, see the PutBucketNotificationConfiguration operation.

", - "deprecated":true + "deprecated":true, + "httpChecksumRequired":true }, "PutBucketNotificationConfiguration":{ "name":"PutBucketNotificationConfiguration", @@ -798,7 +806,8 @@ }, "input":{"shape":"PutBucketPolicyRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html", - "documentation":"

Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

The following operations are related to PutBucketPolicy:

" + "documentation":"

Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

The following operations are related to PutBucketPolicy:

", + "httpChecksumRequired":true }, "PutBucketReplication":{ "name":"PutBucketReplication", @@ -807,7 +816,8 @@ "requestUri":"/{Bucket}?replication" }, "input":{"shape":"PutBucketReplicationRequest"}, - "documentation":"

Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

To perform this operation, the user or role performing the operation must have the iam:PassRole permission.

Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset. All rules must specify the same destination bucket.

To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

For information about enabling versioning on a bucket, see Using Versioning.

By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

Handling Replication of Encrypted Objects

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

For information on PutBucketReplication errors, see ReplicationErrorCodeList

The following operations are related to PutBucketReplication:

" + "documentation":"

Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

To perform this operation, the user or role performing the operation must have the iam:PassRole permission.

Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset. All rules must specify the same destination bucket.

To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

For information about enabling versioning on a bucket, see Using Versioning.

By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

Handling Replication of Encrypted Objects

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

For information on PutBucketReplication errors, see ReplicationErrorCodeList

The following operations are related to PutBucketReplication:

", + "httpChecksumRequired":true }, "PutBucketRequestPayment":{ "name":"PutBucketRequestPayment", @@ -817,7 +827,8 @@ }, "input":{"shape":"PutBucketRequestPaymentRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentPUT.html", - "documentation":"

Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.

The following operations are related to PutBucketRequestPayment:

" + "documentation":"

Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.

The following operations are related to PutBucketRequestPayment:

", + "httpChecksumRequired":true }, "PutBucketTagging":{ "name":"PutBucketTagging", @@ -827,7 +838,8 @@ }, "input":{"shape":"PutBucketTaggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html", - "documentation":"

Sets the tags for a bucket.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

  • Error code: InvalidTagError

  • Error code: MalformedXMLError

    • Description: The XML provided does not match the schema.

  • Error code: OperationAbortedError

    • Description: A conflicting conditional operation is currently in progress against this resource. Please try again.

  • Error code: InternalError

    • Description: The service was unable to apply the provided tag to the bucket.

The following operations are related to PutBucketTagging:

" + "documentation":"

Sets the tags for a bucket.

Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors:

  • Error code: InvalidTagError

  • Error code: MalformedXMLError

    • Description: The XML provided does not match the schema.

  • Error code: OperationAbortedError

    • Description: A conflicting conditional operation is currently in progress against this resource. Please try again.

  • Error code: InternalError

    • Description: The service was unable to apply the provided tag to the bucket.

The following operations are related to PutBucketTagging:

", + "httpChecksumRequired":true }, "PutBucketVersioning":{ "name":"PutBucketVersioning", @@ -837,7 +849,8 @@ }, "input":{"shape":"PutBucketVersioningRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html", - "documentation":"

Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.

You can set the versioning state with one of the following values:

Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.

Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.

If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.

If the bucket owner enables MFA Delete in the bucket versioning configuration, the bucket owner must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket.

If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.

Related Resources

" + "documentation":"

Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.

You can set the versioning state with one of the following values:

Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.

Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.

If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.

If the bucket owner enables MFA Delete in the bucket versioning configuration, the bucket owner must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket.

If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.

Related Resources

", + "httpChecksumRequired":true }, "PutBucketWebsite":{ "name":"PutBucketWebsite", @@ -847,7 +860,8 @@ }, "input":{"shape":"PutBucketWebsiteRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html", - "documentation":"

Sets the configuration of the website that is specified in the website subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.

This PUT operation requires the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission.

To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.

  • WebsiteConfiguration

  • RedirectAllRequestsTo

  • HostName

  • Protocol

If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.

  • WebsiteConfiguration

  • IndexDocument

  • Suffix

  • ErrorDocument

  • Key

  • RoutingRules

  • RoutingRule

  • Condition

  • HttpErrorCodeReturnedEquals

  • KeyPrefixEquals

  • Redirect

  • Protocol

  • HostName

  • ReplaceKeyPrefixWith

  • ReplaceKeyWith

  • HttpRedirectCode

" + "documentation":"

Sets the configuration of the website that is specified in the website subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.

This PUT operation requires the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission.

To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.

  • WebsiteConfiguration

  • RedirectAllRequestsTo

  • HostName

  • Protocol

If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.

  • WebsiteConfiguration

  • IndexDocument

  • Suffix

  • ErrorDocument

  • Key

  • RoutingRules

  • RoutingRule

  • Condition

  • HttpErrorCodeReturnedEquals

  • KeyPrefixEquals

  • Redirect

  • Protocol

  • HostName

  • ReplaceKeyPrefixWith

  • ReplaceKeyWith

  • HttpRedirectCode

Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon Simple Storage Service Developer Guide.

", + "httpChecksumRequired":true }, "PutObject":{ "name":"PutObject", @@ -858,7 +872,7 @@ "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation":"

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

To configure your application to send the request headers before sending the request body, use the 100-continue HTTP status code. For PUT operations, this helps you avoid sending the message body if the message is rejected based on the headers (for example, because authentication fails or a redirect occurs). For more information on the 100-continue HTTP status code, see Section 8.2.3 of http://www.ietf.org/rfc/rfc2616.txt.

You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys. For more information, see Using Server-Side Encryption.

Access Permissions

You can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

  • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id of the symmetric customer managed CMK. Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS.

Access-Control-List (ACL)-Specific Request Headers

You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the Access Control List (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

  • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

  • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly use:

    • x-amz-grant-read

    • x-amz-grant-write

    • x-amz-grant-read-acp

    • x-amz-grant-write-acp

    • x-amz-grant-full-control

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • EU (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

Server-Side- Encryption-Specific Request Headers

You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.

  • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

    • x-amz-server-side​-encryption

    • x-amz-server-side-encryption-aws-kms-key-id

    • x-amz-server-side-encryption-context

    If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id of the symmetric customer managed CMK. Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

    All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

  • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

    If you use this feature, the ETag value that Amazon S3 returns in the response is not the MD5 of the object.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

Storage Class Options

By default, Amazon S3 uses the Standard storage class to store newly created objects. The Standard storage class provides high durability and high availability. You can specify other storage classes depending on the performance needs. For more information, see Storage Classes in the Amazon Simple Storage Service Developer Guide.

Versioning

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response using the x-amz-version-id response header. If versioning is suspended, Amazon S3 always uses null as the version ID for the object stored. For more information about returning the versioning state of a bucket, see GetBucketVersioning. If you enable versioning for a bucket, when Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

Related Resources

" + "documentation":"

Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.

Server-side Encryption

You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys. For more information, see Using Server-Side Encryption.

Access Control List (ACL)-Specific Request Headers

You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.

Storage Class Options

By default, Amazon S3 uses the STANDARD storage class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different storage class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

Versioning

If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.

Related Resources

" }, "PutObjectAcl":{ "name":"PutObjectAcl", @@ -872,7 +886,8 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html", - "documentation":"

Uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket. You must have WRITE_ACP permission to set the ACL of an object.

Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach.

Access Permissions

You can set access permissions using one of the following methods:

  • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

    You specify each grantee as a type=value pair, where the type is one of the following:

    • emailAddress – if the value specified is the email address of an AWS account

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    For example, the following x-amz-grant-read header grants list objects permission to the two AWS accounts identified by their email addresses.

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

  • By Email address:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

    The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

  • By the person's ID:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

    DisplayName is optional and ignored in the request.

  • By URI:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

Versioning

The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

Related Resources

" + "documentation":"

Uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket. You must have WRITE_ACP permission to set the ACL of an object.

Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 Developer Guide.

Access Permissions

You can set access permissions using one of the following methods:

  • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

  • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

    You specify each grantee as a type=value pair, where the type is one of the following:

    • id – if the value specified is the canonical user ID of an AWS account

    • uri – if you are granting permissions to a predefined group

    • emailAddress – if the value specified is the email address of an AWS account

      Using email addresses to specify a grantee is only supported in the following AWS Regions:

      • US East (N. Virginia)

      • US West (N. California)

      • US West (Oregon)

      • Asia Pacific (Singapore)

      • Asia Pacific (Sydney)

      • Asia Pacific (Tokyo)

      • Europe (Ireland)

      • South America (São Paulo)

      For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

    For example, the following x-amz-grant-read header grants list objects permission to the two AWS accounts identified by their email addresses.

    x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

Grantee Values

You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

  • By the person's ID:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

    DisplayName is optional and ignored in the request.

  • By URI:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

  • By Email address:

    <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

    The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

    Using email addresses to specify a grantee is only supported in the following AWS Regions:

    • US East (N. Virginia)

    • US West (N. California)

    • US West (Oregon)

    • Asia Pacific (Singapore)

    • Asia Pacific (Sydney)

    • Asia Pacific (Tokyo)

    • Europe (Ireland)

    • South America (São Paulo)

    For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

Versioning

The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

Related Resources

", + "httpChecksumRequired":true }, "PutObjectLegalHold":{ "name":"PutObjectLegalHold", @@ -882,7 +897,8 @@ }, "input":{"shape":"PutObjectLegalHoldRequest"}, "output":{"shape":"PutObjectLegalHoldOutput"}, - "documentation":"

Applies a Legal Hold configuration to the specified object.

Related Resources

" + "documentation":"

Applies a Legal Hold configuration to the specified object.

Related Resources

", + "httpChecksumRequired":true }, "PutObjectLockConfiguration":{ "name":"PutObjectLockConfiguration", @@ -892,7 +908,8 @@ }, "input":{"shape":"PutObjectLockConfigurationRequest"}, "output":{"shape":"PutObjectLockConfigurationOutput"}, - "documentation":"

Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket.

DefaultRetention requires either Days or Years. You can't specify both at the same time.

Related Resources

" + "documentation":"

Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket.

DefaultRetention requires either Days or Years. You can't specify both at the same time.

Related Resources

", + "httpChecksumRequired":true }, "PutObjectRetention":{ "name":"PutObjectRetention", @@ -902,7 +919,8 @@ }, "input":{"shape":"PutObjectRetentionRequest"}, "output":{"shape":"PutObjectRetentionOutput"}, - "documentation":"

Places an Object Retention configuration on an object.

Related Resources

" + "documentation":"

Places an Object Retention configuration on an object.

Related Resources

", + "httpChecksumRequired":true }, "PutObjectTagging":{ "name":"PutObjectTagging", @@ -912,7 +930,8 @@ }, "input":{"shape":"PutObjectTaggingRequest"}, "output":{"shape":"PutObjectTaggingOutput"}, - "documentation":"

Sets the supplied tag-set to an object that already exists in a bucket

A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

For information about the Amazon S3 object tagging feature, see Object Tagging.

Special Errors

    • Code: InvalidTagError

    • Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.

    • Code: MalformedXMLError

    • Cause: The XML provided does not match the schema.

    • Code: OperationAbortedError

    • Cause: A conflicting conditional operation is currently in progress against this resource. Please try again.

    • Code: InternalError

    • Cause: The service was unable to apply the provided tag to the object.

Related Resources

" + "documentation":"

Sets the supplied tag-set to an object that already exists in a bucket.

A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

For information about the Amazon S3 object tagging feature, see Object Tagging.

Special Errors

    • Code: InvalidTagError

    • Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.

    • Code: MalformedXMLError

    • Cause: The XML provided does not match the schema.

    • Code: OperationAbortedError

    • Cause: A conflicting conditional operation is currently in progress against this resource. Please try again.

    • Code: InternalError

    • Cause: The service was unable to apply the provided tag to the object.

Related Resources

", + "httpChecksumRequired":true }, "PutPublicAccessBlock":{ "name":"PutPublicAccessBlock", @@ -921,7 +940,8 @@ "requestUri":"/{Bucket}?publicAccessBlock" }, "input":{"shape":"PutPublicAccessBlockRequest"}, - "documentation":"

Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

Related Resources

" + "documentation":"

Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

Related Resources

", + "httpChecksumRequired":true }, "RestoreObject":{ "name":"RestoreObject", @@ -935,7 +955,7 @@ {"shape":"ObjectAlreadyInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", - "documentation":"

Restores an archived copy of an object back into Amazon S3

This operation performs the following types of requests:

  • select - Perform a select query on an archived object

  • restore an archive - Restore an archived object

To use this operation, you must have permissions to perform the s3:RestoreObject and s3:GetObject actions. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

When making a select request, do the following:

  • Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

    For more information about the S3 structure in the request body, see the following:

  • Define the SQL expression for the SELECT type of restoration for your query in the request body's SelectParameters structure. You can use expressions like the following examples.

    • The following expression returns all records from the specified object.

      SELECT * FROM Object

    • Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.

      SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

    • If you have headers and you set the fileHeaderInfo in the CSV structure in the request body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.

      SELECT s.Id, s.FirstName, s.SSN FROM S3Object s

For more information about using SQL with Glacier Select restore, see SQL Reference for Amazon S3 Select and Glacier Select in the Amazon Simple Storage Service Developer Guide.

When making a select request, you can also do the following:

  • To expedite your queries, specify the Expedited tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.

  • Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.

The following are additional important facts about the select feature:

  • The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.

  • You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.

  • Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409.

Restoring Archives

Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To access an archived object, you must first initiate a restore request. This restores a temporary copy of the archived object. In a restore request, you specify the number of days that you want the restored copy to exist. After the specified period, Amazon S3 deletes the temporary copy but the object remains archived in the GLACIER or DEEP_ARCHIVE storage class that object was restored from.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

The time it takes restore jobs to finish depends on which storage class the object is being restored from and which data access tier you specify.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

  • Expedited - Expedited retrievals allow you to quickly access your data stored in the GLACIER storage class when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals are typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for the DEEP_ARCHIVE storage class.

  • Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for the GLACIER and DEEP_ARCHIVE retrieval requests that do not specify the retrieval option. Standard retrievals typically complete within 3-5 hours from the GLACIER storage class and typically complete within 12 hours from the DEEP_ARCHIVE storage class.

  • Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval option, enabling you to retrieve large amounts, even petabytes, of data inexpensively in a day. Bulk retrievals typically complete within 5-12 hours from the GLACIER storage class and typically complete within 48 hours from the DEEP_ARCHIVE storage class.

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. You upgrade the speed of an in-progress restoration by issuing another restore request to the same object, setting a new Tier request element. When issuing a request to upgrade the restore tier, you must choose a tier that is faster than the tier that the in-progress restore is using. You must not change any other parameters, such as the Days request element. For more information, see Upgrading the Speed of an In-Progress Restore in the Amazon Simple Storage Service Developer Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

Responses

A successful operation returns either the 200 OK or 202 Accepted status code.

  • If the object copy is not previously restored, then Amazon S3 returns 202 Accepted in the response.

  • If the object copy is previously restored, Amazon S3 returns 200 OK in the response.

Special Errors

    • Code: RestoreAlreadyInProgress

    • Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)

    • HTTP Status Code: 409 Conflict

    • SOAP Fault Code Prefix: Client

    • Code: GlacierExpeditedRetrievalNotAvailable

    • Cause: Glacier expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to Standard or Bulk retrievals.)

    • HTTP Status Code: 503

    • SOAP Fault Code Prefix: N/A

Related Resources

", + "documentation":"

Restores an archived copy of an object back into Amazon S3

This operation performs the following types of requests:

  • select - Perform a select query on an archived object

  • restore an archive - Restore an archived object

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

When making a select request, do the following:

  • Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

    For more information about the S3 structure in the request body, see the following:

  • Define the SQL expression for the SELECT type of restoration for your query in the request body's SelectParameters structure. You can use expressions like the following examples.

    • The following expression returns all records from the specified object.

      SELECT * FROM Object

    • Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.

      SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

    • If you have headers and you set the fileHeaderInfo in the CSV structure in the request body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.

      SELECT s.Id, s.FirstName, s.SSN FROM S3Object s

For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

When making a select request, you can also do the following:

  • To expedite your queries, specify the Expedited tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.

  • Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.

The following are additional important facts about the select feature:

  • The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.

  • You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.

  • Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409.

Restoring Archives

Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To access an archived object, you must first initiate a restore request. This restores a temporary copy of the archived object. In a restore request, you specify the number of days that you want the restored copy to exist. After the specified period, Amazon S3 deletes the temporary copy but the object remains archived in the GLACIER or DEEP_ARCHIVE storage class that object was restored from.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

The time it takes restore jobs to finish depends on which storage class the object is being restored from and which data access tier you specify.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

  • Expedited - Expedited retrievals allow you to quickly access your data stored in the GLACIER storage class when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals are typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for the DEEP_ARCHIVE storage class.

  • Standard - S3 Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for the GLACIER and DEEP_ARCHIVE retrieval requests that do not specify the retrieval option. S3 Standard retrievals typically complete within 3-5 hours from the GLACIER storage class and typically complete within 12 hours from the DEEP_ARCHIVE storage class.

  • Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval option, enabling you to retrieve large amounts, even petabytes, of data inexpensively in a day. Bulk retrievals typically complete within 5-12 hours from the GLACIER storage class and typically complete within 48 hours from the DEEP_ARCHIVE storage class.

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. You upgrade the speed of an in-progress restoration by issuing another restore request to the same object, setting a new Tier request element. When issuing a request to upgrade the restore tier, you must choose a tier that is faster than the tier that the in-progress restore is using. You must not change any other parameters, such as the Days request element. For more information, see Upgrading the Speed of an In-Progress Restore in the Amazon Simple Storage Service Developer Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

Responses

A successful operation returns either the 200 OK or 202 Accepted status code.

  • If the object copy is not previously restored, then Amazon S3 returns 202 Accepted in the response.

  • If the object copy is previously restored, Amazon S3 returns 200 OK in the response.

Special Errors

    • Code: RestoreAlreadyInProgress

    • Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)

    • HTTP Status Code: 409 Conflict

    • SOAP Fault Code Prefix: Client

    • Code: GlacierExpeditedRetrievalNotAvailable

    • Cause: S3 Glacier expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)

    • HTTP Status Code: 503

    • SOAP Fault Code Prefix: N/A

Related Resources

", "alias":"PostObjectRestore" }, "SelectObjectContent":{ @@ -950,7 +970,7 @@ "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "output":{"shape":"SelectObjectContentOutput"}, - "documentation":"

This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.

For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and Glacier Select in the Amazon Simple Storage Service Developer Guide.

Permissions

You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.

Object Data Formats

You can use Amazon S3 Select to query objects that have the following format properties:

  • CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.

  • UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.

  • GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.

  • Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.

    For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon Simple Storage Service Developer Guide.

    For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.

Working with the Response Body

Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see RESTSelectObjectAppendix .

GetObject Support

The SelectObjectContent operation does not support the following GetObject functionality. For more information, see GetObject.

  • Range: While you can specify a scan range for a Amazon S3 Select request, see SelectObjectContentRequest$ScanRange in the request parameters below, you cannot specify the range of bytes of an object to return.

  • GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For more information, about storage classes see Storage Classes in the Amazon Simple Storage Service Developer Guide.

Special Errors

For a list of special errors for this operation and for general information about Amazon S3 errors and a list of error codes, see ErrorResponses

Related Resources

" + "documentation":"

This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.

For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

Permissions

You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.

Object Data Formats

You can use Amazon S3 Select to query objects that have the following format properties:

  • CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.

  • UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.

  • GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.

  • Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.

    For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon Simple Storage Service Developer Guide.

    For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.

Working with the Response Body

Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see RESTSelectObjectAppendix .

GetObject Support

The SelectObjectContent operation does not support the following GetObject functionality. For more information, see GetObject.

  • Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest$ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.

  • GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For more information, about storage classes see Storage Classes in the Amazon Simple Storage Service Developer Guide.

Special Errors

For a list of special errors for this operation, see SelectObjectContentErrorCodeList

Related Resources

" }, "UploadPart":{ "name":"UploadPart", @@ -1174,7 +1194,7 @@ }, "BucketAccountId":{ "shape":"AccountId", - "documentation":"

The account ID that owns the destination bucket. If no account ID is provided, the owner will not be validated prior to exporting data.

" + "documentation":"

The account ID that owns the destination S3 bucket. If no account ID is provided, the owner is not validated before exporting data.

Although this value is optional, we strongly recommend that you set it to help prevent problems if the destination bucket ownership changes.

" }, "Bucket":{ "shape":"BucketName", @@ -2732,7 +2752,7 @@ }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

The storage class to use when replicating objects, such as standard or reduced redundancy. By default, Amazon S3 uses the storage class of the source object to create the object replica.

For valid values, see the StorageClass element of the PUT Bucket replication action in the Amazon Simple Storage Service API Reference.

" + "documentation":"

The storage class to use when replicating objects, such as S3 Standard or reduced redundancy. By default, Amazon S3 uses the storage class of the source object to create the object replica.

For valid values, see the StorageClass element of the PUT Bucket replication action in the Amazon Simple Storage Service API Reference.

" }, "AccessControlTranslation":{ "shape":"AccessControlTranslation", @@ -3141,7 +3161,7 @@ "members":{ "LocationConstraint":{ "shape":"BucketLocationConstraint", - "documentation":"

Specifies the Region where the bucket resides. For a list of all the Amazon S3 supported location constraints by Region, see Regions and Endpoints.

" + "documentation":"

Specifies the Region where the bucket resides. For a list of all the Amazon S3 supported location constraints by Region, see Regions and Endpoints. Buckets in Region us-east-1 have a LocationConstraint of null.

" } } }, @@ -3212,7 +3232,7 @@ "members":{ "Bucket":{ "shape":"BucketName", - "documentation":"

Name of the bucket for which to get the notification configuration

", + "documentation":"

Name of the bucket for which to get the notification configuration.

", "location":"uri", "locationName":"Bucket" } @@ -3359,11 +3379,11 @@ }, "IndexDocument":{ "shape":"IndexDocument", - "documentation":"

The name of the index document for the website.

" + "documentation":"

The name of the index document for the website (for example index.html).

" }, "ErrorDocument":{ "shape":"ErrorDocument", - "documentation":"

The name of the error document for the website.

" + "documentation":"

The object key name of the website error document to use for 4XX class errors.

" }, "RoutingRules":{ "shape":"RoutingRules", @@ -3640,7 +3660,7 @@ }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

Provides storage class information of the object. Amazon S3 returns this header for all objects except for Standard storage class objects.

", + "documentation":"

Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.

", "location":"header", "locationName":"x-amz-storage-class" }, @@ -3733,7 +3753,7 @@ }, "Range":{ "shape":"Range", - "documentation":"

Downloads the specified range bytes of an object. For more information about the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

", + "documentation":"

Downloads the specified range bytes of an object. For more information about the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

Amazon S3 doesn't support retrieving multiple ranges of data per GET request.

", "location":"header", "locationName":"Range" }, @@ -3965,10 +3985,10 @@ "members":{ "Tier":{ "shape":"Tier", - "documentation":"

Glacier retrieval tier at which the restore will be processed.

" + "documentation":"

S3 Glacier retrieval tier at which the restore will be processed.

" } }, - "documentation":"

Container for Glacier job parameters.

" + "documentation":"

Container for S3 Glacier job parameters.

" }, "Grant":{ "type":"structure", @@ -3999,7 +4019,7 @@ }, "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

Email address of the grantee.

" + "documentation":"

Email address of the grantee.

Using email addresses to specify a grantee is only supported in the following AWS Regions:

  • US East (N. Virginia)

  • US West (N. California)

  • US West (Oregon)

  • Asia Pacific (Singapore)

  • Asia Pacific (Sydney)

  • Asia Pacific (Tokyo)

  • Europe (Ireland)

  • South America (São Paulo)

For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.

" }, "ID":{ "shape":"ID", @@ -4172,7 +4192,7 @@ }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

Provides storage class information of the object. Amazon S3 returns this header for all objects except for Standard storage class objects.

For more information, see Storage Classes.

", + "documentation":"

Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.

For more information, see Storage Classes.

", "location":"header", "locationName":"x-amz-storage-class" }, @@ -4258,7 +4278,7 @@ }, "Range":{ "shape":"Range", - "documentation":"

Downloads the specified range bytes of an object. For more information about the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

", + "documentation":"

Downloads the specified range bytes of an object. For more information about the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

Amazon S3 doesn't support retrieving multiple ranges of data per GET request.

", "location":"header", "locationName":"Range" }, @@ -4494,7 +4514,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The ID of the account that owns the destination bucket.

" + "documentation":"

The account ID that owns the destination S3 bucket. If no account ID is provided, the owner is not validated before exporting data.

Although this value is optional, we strongly recommend that you set it to help prevent problems if the destination bucket ownership changes.

" }, "Bucket":{ "shape":"BucketName", @@ -4545,7 +4565,7 @@ "members":{ "RecordDelimiter":{ "shape":"RecordDelimiter", - "documentation":"

The value used to separate individual records in the output.

" + "documentation":"

The value used to separate individual records in the output. If no value is specified, Amazon S3 uses a newline character ('\\n').

" } }, "documentation":"

Specifies JSON as request's output serialization format.

" @@ -5015,7 +5035,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.

", + "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.

", "location":"querystring", "locationName":"max-keys" }, @@ -5107,7 +5127,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

", + "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

", "location":"querystring", "locationName":"max-keys" }, @@ -5150,7 +5170,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

" + "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

" }, "CommonPrefixes":{ "shape":"CommonPrefixList", @@ -5202,7 +5222,7 @@ }, "MaxKeys":{ "shape":"MaxKeys", - "documentation":"

Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

", + "documentation":"

Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.

", "location":"querystring", "locationName":"max-keys" }, @@ -6092,6 +6112,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6172,13 +6194,15 @@ }, "CORSConfiguration":{ "shape":"CORSConfiguration", - "documentation":"

Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

", + "documentation":"

Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

", "locationName":"CORSConfiguration", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} }, "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" } @@ -6201,6 +6225,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. This parameter is auto-populated when using the command from the CLI.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6273,6 +6299,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6307,6 +6335,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The MD5 hash of the PutBucketLogging request body.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" } @@ -6379,6 +6409,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The MD5 hash of the PutPublicAccessBlock request body.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6407,6 +6439,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The MD5 hash of the request body.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6439,6 +6473,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6472,6 +6508,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

>The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6500,6 +6538,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6528,6 +6568,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

>The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6562,6 +6604,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6612,6 +6656,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.>

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -6714,6 +6760,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The MD5 hash for the request body.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" } @@ -6760,6 +6808,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The MD5 hash for the request body.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" } @@ -6940,7 +6990,7 @@ }, "StorageClass":{ "shape":"StorageClass", - "documentation":"

If you don't specify, Standard is the default storage class. Amazon S3 supports other storage classes.

", + "documentation":"

If you don't specify, S3 Standard is the default storage class. Amazon S3 supports other storage classes.

", "location":"header", "locationName":"x-amz-storage-class" }, @@ -7067,6 +7117,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The MD5 hash for the request body.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" } @@ -7113,6 +7165,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The MD5 hash for the request body.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -7141,6 +7195,8 @@ "ContentMD5":{ "shape":"ContentMD5", "documentation":"

The MD5 hash of the PutPublicAccessBlock request body.

", + "deprecated":true, + "deprecatedMessage":"Content-MD5 header will now be automatically computed and injected in associated operation's Http request.", "location":"header", "locationName":"Content-MD5" }, @@ -7521,7 +7577,7 @@ }, "GlacierJobParameters":{ "shape":"GlacierJobParameters", - "documentation":"

Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.

" + "documentation":"

S3 Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.

" }, "Type":{ "shape":"RestoreRequestType", @@ -7529,7 +7585,7 @@ }, "Tier":{ "shape":"Tier", - "documentation":"

Glacier retrieval tier at which the restore will be processed.

" + "documentation":"

S3 Glacier retrieval tier at which the restore will be processed.

" }, "Description":{ "shape":"Description", @@ -7598,13 +7654,13 @@ }, "Transition":{ "shape":"Transition", - "documentation":"

Specifies when an object transitions to a specified storage class.

" + "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

" }, "NoncurrentVersionTransition":{"shape":"NoncurrentVersionTransition"}, "NoncurrentVersionExpiration":{"shape":"NoncurrentVersionExpiration"}, "AbortIncompleteMultipartUpload":{"shape":"AbortIncompleteMultipartUpload"} }, - "documentation":"

Specifies lifecycle rules for an Amazon S3 bucket. For more information, see PUT Bucket lifecycle in the Amazon Simple Storage Service API Reference.

" + "documentation":"

Specifies lifecycle rules for an Amazon S3 bucket. For more information, see Put Bucket Lifecycle Configuration in the Amazon Simple Storage Service API Reference. For examples, see Put Bucket Lifecycle Configuration Examples

" }, "Rules":{ "type":"list", @@ -7857,7 +7913,7 @@ }, "KMSMasterKeyID":{ "shape":"SSEKMSKeyId", - "documentation":"

KMS master key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm is set to aws:kms.

" + "documentation":"

AWS Key Management Service (KMS) customer master key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm is set to aws:kms.

You can specify the key ID or the Amazon Resource Name (ARN) of the CMK. However, if you are using encryption with cross-account operations, you must use a fully qualified CMK ARN. For more information, see Using encryption for cross-account operations.

For example:

  • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

  • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more information, see Using Symmetric and Asymmetric Keys in the AWS Key Management Service Developer Guide.

" } }, "documentation":"

Describes the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied. For more information, see PUT Bucket encryption in the Amazon Simple Storage Service API Reference.

" @@ -8138,7 +8194,7 @@ "documentation":"

The storage class to which you want the object to transition.

" } }, - "documentation":"

Specifies when an object transitions to a specified storage class.

" + "documentation":"

Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.

" }, "TransitionList":{ "type":"list", diff --git a/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors b/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors index 535869feb1e0..91ecfda62673 100644 --- a/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors +++ b/services/s3/src/main/resources/software/amazon/awssdk/services/s3/execution.interceptors @@ -11,3 +11,4 @@ software.amazon.awssdk.services.s3.internal.handlers.AsyncChecksumValidationInte software.amazon.awssdk.services.s3.internal.handlers.SyncChecksumValidationInterceptor software.amazon.awssdk.services.s3.internal.handlers.EnableTrailingChecksumInterceptor software.amazon.awssdk.services.s3.internal.handlers.ExceptionTranslationInterceptor +software.amazon.awssdk.services.s3.internal.handlers.GetObjectInterceptor \ No newline at end of file diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/InvalidRegionTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/InvalidRegionTest.java new file mode 100644 index 000000000000..82cc321f2bd6 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/InvalidRegionTest.java @@ -0,0 +1,89 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.time.Duration; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; + +public class InvalidRegionTest { + @Test + public void invalidS3UtilitiesRegionAtClientGivesHelpfulMessage() { + S3Utilities utilities = S3Utilities.builder().region(Region.of("US_EAST_1")).build(); + + assertThatThrownBy(() -> utilities.getUrl(r -> r.bucket("foo").key("bar"))) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_EAST_1") + .hasMessageContaining("region") + .hasMessageContaining("us-east-1"); + } + + @Test + public void invalidS3UtilitiesRegionAtRequestGivesHelpfulMessage() { + S3Utilities utilities = S3Utilities.builder().region(Region.of("us-east-1")).build(); + + assertThatThrownBy(() -> utilities.getUrl(r -> r.bucket("foo").key("bar").region(Region.of("US_WEST_2")))) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_WEST_2") + .hasMessageContaining("region") + .hasMessageContaining("us-west-2"); + } + + @Test + public void invalidS3ArnRegionAtRequestGivesHelpfulMessage() { + S3Client client = S3Client.builder() + .region(Region.of("us-east-1")) + .credentialsProvider(AnonymousCredentialsProvider.create()) + .serviceConfiguration(c -> c.useArnRegionEnabled(true)) + .build(); + + assertThatThrownBy(() -> client.getObject(r -> r.bucket("arn:aws:s3:US_EAST_1:123456789012:accesspoint/test") + .key("test"))) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_EAST_1") + .hasMessageContaining("region"); + } + + @Test + public void invalidS3PresignerRegionAtClientGivesHelpfulMessage() { + assertThatThrownBy(() -> S3Presigner.builder().region(Region.of("US_EAST_1")).build()) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_EAST_1") + .hasMessageContaining("region") + .hasMessageContaining("us-east-1"); + } + + @Test + public void invalidS3PresignerArnRegionAtRequestGivesHelpfulMessage() { + S3Presigner presigner = S3Presigner.builder() + .region(Region.of("us-east-1")) + .credentialsProvider(AnonymousCredentialsProvider.create()) + .serviceConfiguration(S3Configuration.builder().useArnRegionEnabled(true).build()) + .build(); + + String arn = "arn:aws:s3:US_EAST_1:123456789012:accesspoint/test"; + assertThatThrownBy(() -> presigner.presignGetObject(r -> r.getObjectRequest(g -> g.bucket(arn).key("test")) + .signatureDuration(Duration.ofMinutes(15)))) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_EAST_1") + .hasMessageContaining("region"); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java index 5cf3f215c607..7ac86abc0170 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java @@ -30,7 +30,6 @@ import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.signer.AwsS3V4Signer; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; -import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.model.RequestPayer; @@ -41,6 +40,7 @@ @RunWith(MockitoJUnitRunner.class) public class S3PresignerTest { private static final URI FAKE_URL; + private static final String BUCKET = "some-bucket"; private S3Presigner presigner; @@ -64,8 +64,12 @@ private S3Presigner.Builder presignerBuilder() { .credentialsProvider(() -> AwsBasicCredentials.create("x", "x")); } + private S3Presigner generateMaximal() { return S3Presigner.builder() + .serviceConfiguration(S3Configuration.builder() + .checksumValidationEnabled(false) + .build()) .credentialsProvider(() -> AwsBasicCredentials.create("x", "x")) .region(Region.US_EAST_1) .endpointOverride(FAKE_URL) @@ -325,4 +329,128 @@ public void putObject_Sigv4PresignerHonorsSignatureDuration() { assertThat(Integer.parseInt(expires)).isCloseTo(1234, Offset.offset(2)); }); } + + @Test + public void getObject_S3ConfigurationCanBeOverriddenToLeverageTransferAcceleration() { + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .accelerateModeEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket("foo34343434") + .key("bar"))); + + + System.out.println(presignedRequest.url()); + + assertThat(presignedRequest.httpRequest().host()).contains(".s3-accelerate."); + } + + + @Test + public void accelerateEnabled_UsesVirtualAddressingWithAccelerateEndpoint() { + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .accelerateModeEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(BUCKET) + .key("bar"))); + + assertThat(presignedRequest.httpRequest().host()).isEqualTo(String.format("%s.s3-accelerate.amazonaws.com", BUCKET)); + } + + /** + * Dualstack uses regional endpoints that support virtual addressing. + */ + @Test + public void dualstackEnabled_UsesVirtualAddressingWithDualstackEndpoint() throws Exception { + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .dualstackEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(BUCKET) + .key("bar"))); + + assertThat(presignedRequest.httpRequest().host()).contains(String.format("%s.s3.dualstack.us-west-2.amazonaws.com", BUCKET)); + } + + /** + * Dualstack also supports path style endpoints just like the normal endpoints. + */ + @Test + public void dualstackAndPathStyleEnabled_UsesPathStyleAddressingWithDualstackEndpoint() throws Exception { + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .dualstackEnabled(true) + .pathStyleAccessEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(BUCKET) + .key("bar"))); + + assertThat(presignedRequest.httpRequest().host()).isEqualTo("s3.dualstack.us-west-2.amazonaws.com"); + assertThat(presignedRequest.url().toString()).startsWith(String.format("https://s3.dualstack.us-west-2.amazonaws.com/%s/%s?", BUCKET, "bar")); + } + + /** + * When dualstack and accelerate are both enabled there is a special, global dualstack endpoint we must use. + */ + @Test + public void dualstackAndAccelerateEnabled_UsesDualstackAccelerateEndpoint() throws Exception { + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .dualstackEnabled(true) + .accelerateModeEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(BUCKET) + .key("bar"))); + + assertThat(presignedRequest.httpRequest().host()).isEqualTo(String.format("%s.s3-accelerate.dualstack.amazonaws.com", BUCKET)); + } + + @Test + public void accessPointArn_differentRegion_useArnRegionTrue() throws Exception { + String customEndpoint = "https://foobar-12345678910.s3-accesspoint.us-west-2.amazonaws.com"; + String accessPointArn = "arn:aws:s3:us-west-2:12345678910:accesspoint:foobar"; + + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .useArnRegionEnabled(true) + .build()) + .build(); + + PresignedGetObjectRequest presignedRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(accessPointArn) + .key("bar"))); + + assertThat(presignedRequest.url().toString()).startsWith(customEndpoint); + } + + @Test + public void accessPointArn_differentRegion_useArnRegionFalse_throwsIllegalArgumentException() throws Exception { + String accessPointArn = "arn:aws:s3:us-east-1:12345678910:accesspoint:foobar"; + + S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() + .useArnRegionEnabled(false) + .build()) + .build(); + + assertThatThrownBy(() -> presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket(accessPointArn).key("bar")))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("region"); + } } \ No newline at end of file diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index 900e3ea4a81f..5eb2a4e02e66 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3control/src/main/resources/codegen-resources/service-2.json b/services/s3control/src/main/resources/codegen-resources/service-2.json index b235e6e3618d..d37fbb8606a4 100644 --- a/services/s3control/src/main/resources/codegen-resources/service-2.json +++ b/services/s3control/src/main/resources/codegen-resources/service-2.json @@ -42,7 +42,7 @@ {"shape":"IdempotencyException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates an Amazon S3 batch operations job.

" + "documentation":"

You can use Amazon S3 Batch Operations to perform large-scale Batch Operations on Amazon S3 objects. Amazon S3 Batch Operations can execute a single operation or action on lists of Amazon S3 objects that you specify. For more information, see Amazon S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

" }, "DeleteAccessPoint":{ "name":"DeleteAccessPoint", @@ -75,7 +75,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"NotFoundException"} ], - "documentation":"

Delete the tags on a Amazon S3 batch operations job, if any.

" + "documentation":"

Removes the entire tag set from the specified Amazon S3 Batch Operations job. To use this operation, you must have permission to perform the s3:DeleteJobTagging action. For more information, see Using Job Tags in the Amazon Simple Storage Service Developer Guide.

Related actions include:

" }, "DeletePublicAccessBlock":{ "name":"DeletePublicAccessBlock", @@ -100,7 +100,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves the configuration parameters and status for a batch operations job.

" + "documentation":"

Retrieves the configuration parameters and status for a Batch Operations job. For more information, see Amazon S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

" }, "GetAccessPoint":{ "name":"GetAccessPoint", @@ -145,7 +145,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"NotFoundException"} ], - "documentation":"

Retrieve the tags on a Amazon S3 batch operations job.

" + "documentation":"

Returns the tags on an Amazon S3 Batch Operations job. To use this operation, you must have permission to perform the s3:GetJobTagging action. For more information, see Using Job Tags in the Amazon Simple Storage Service Developer Guide.

Related actions include:

" }, "GetPublicAccessBlock":{ "name":"GetPublicAccessBlock", @@ -168,7 +168,7 @@ }, "input":{"shape":"ListAccessPointsRequest"}, "output":{"shape":"ListAccessPointsResult"}, - "documentation":"

Returns a list of the access points currently associated with the specified bucket. You can retrieve up to 1000 access points per call. If the specified bucket has more than 1000 access points (or the number specified in maxResults, whichever is less), then the response will include a continuation token that you can use to list the additional access points.

" + "documentation":"

Returns a list of the access points currently associated with the specified bucket. You can retrieve up to 1000 access points per call. If the specified bucket has more than 1,000 access points (or the number specified in maxResults, whichever is less), the response will include a continuation token that you can use to list the additional access points.

" }, "ListJobs":{ "name":"ListJobs", @@ -183,7 +183,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists current jobs and jobs that have ended within the last 30 days for the AWS account making the request.

" + "documentation":"

Lists current Amazon S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see Amazon S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

" }, "PutAccessPointPolicy":{ "name":"PutAccessPointPolicy", @@ -216,7 +216,7 @@ {"shape":"NotFoundException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Replace the set of tags on a Amazon S3 batch operations job.

" + "documentation":"

Set the supplied tag-set on an Amazon S3 Batch Operations job.

A tag is a key-value pair. You can associate Amazon S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this API action to replace the tag set with the one you have modified.. For more information, see Using Job Tags in the Amazon Simple Storage Service Developer Guide.

  • If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you will be charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing.

  • For deleting existing tags for your batch operations job, DeleteJobTagging request is preferred because it achieves the same result without incurring charges.

  • A few things to consider about using tags:

    • Amazon S3 limits the maximum number of tags to 50 tags per job.

    • You can associate up to 50 tags with a job as long as they have unique tag keys.

    • A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.

    • The key and values are case sensitive.

    • For tagging-related restrictions related to characters and encodings, see User-Defined Tag Restrictions.

To use this operation, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

" }, "PutPublicAccessBlock":{ "name":"PutPublicAccessBlock", @@ -241,7 +241,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates an existing job's priority.

" + "documentation":"

Updates an existing Amazon S3 Batch Operations job's priority. For more information, see Amazon S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

" }, "UpdateJobStatus":{ "name":"UpdateJobStatus", @@ -258,7 +258,7 @@ {"shape":"JobStatusException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates the status for the specified job. Use this operation to confirm that you want to run a job or to cancel an existing job.

" + "documentation":"

Updates the status for the specified job. Use this operation to confirm that you want to run a job or to cancel an existing job. For more information, see Amazon S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.

Related actions include:

" } }, "shapes":{ @@ -276,11 +276,11 @@ }, "NetworkOrigin":{ "shape":"NetworkOrigin", - "documentation":"

Indicates whether this access point allows access from the public Internet. If VpcConfiguration is specified for this access point, then NetworkOrigin is VPC, and the access point doesn't allow access from the public Internet. Otherwise, NetworkOrigin is Internet, and the access point allows access from the public Internet, subject to the access point and bucket access policies.

" + "documentation":"

Indicates whether this access point allows access from the public internet. If VpcConfiguration is specified for this access point, then NetworkOrigin is VPC, and the access point doesn't allow access from the public internet. Otherwise, NetworkOrigin is Internet, and the access point allows access from the public internet, subject to the access point and bucket access policies.

" }, "VpcConfiguration":{ "shape":"VpcConfiguration", - "documentation":"

The Virtual Private Cloud (VPC) configuration for this access point, if one exists.

" + "documentation":"

The virtual private cloud (VPC) configuration for this access point, if one exists.

" }, "Bucket":{ "shape":"BucketName", @@ -303,7 +303,8 @@ }, "AccountId":{ "type":"string", - "max":64 + "max":64, + "pattern":"^\\d{12}$" }, "BadRequestException":{ "type":"structure", @@ -346,7 +347,7 @@ }, "VpcConfiguration":{ "shape":"VpcConfiguration", - "documentation":"

If you include this field, Amazon S3 restricts access to this access point to requests from the specified Virtual Private Cloud (VPC).

" + "documentation":"

If you include this field, Amazon S3 restricts access to this access point to requests from the specified virtual private cloud (VPC).

" }, "PublicAccessBlockConfiguration":{"shape":"PublicAccessBlockConfiguration"} } @@ -402,11 +403,11 @@ }, "RoleArn":{ "shape":"IAMRoleArn", - "documentation":"

The Amazon Resource Name (ARN) for the Identity and Access Management (IAM) Role that batch operations will use to execute this job's operation on each object in the manifest.

" + "documentation":"

The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to execute this job's operation on each object in the manifest.

" }, "Tags":{ "shape":"S3TagSet", - "documentation":"

An optional set of tags to associate with the job when it is created.

" + "documentation":"

A set of tags to associate with the Amazon S3 Batch Operations job. This is an optional parameter.

" } } }, @@ -471,13 +472,13 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to remove tags from.

", + "documentation":"

The AWS account ID associated with the Amazon S3 Batch Operations job.

", "location":"header", "locationName":"x-amz-account-id" }, "JobId":{ "shape":"JobId", - "documentation":"

The ID for the job whose tags you want to delete.

", + "documentation":"

The ID for the Amazon S3 Batch Operations job whose tags you want to delete.

", "location":"uri", "locationName":"id" } @@ -535,6 +536,12 @@ "max":1024, "min":1 }, + "FunctionArnString":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + }, "GetAccessPointPolicyRequest":{ "type":"structure", "required":[ @@ -629,11 +636,11 @@ }, "NetworkOrigin":{ "shape":"NetworkOrigin", - "documentation":"

Indicates whether this access point allows access from the public Internet. If VpcConfiguration is specified for this access point, then NetworkOrigin is VPC, and the access point doesn't allow access from the public Internet. Otherwise, NetworkOrigin is Internet, and the access point allows access from the public Internet, subject to the access point and bucket access policies.

" + "documentation":"

Indicates whether this access point allows access from the public internet. If VpcConfiguration is specified for this access point, then NetworkOrigin is VPC, and the access point doesn't allow access from the public internet. Otherwise, NetworkOrigin is Internet, and the access point allows access from the public internet, subject to the access point and bucket access policies.

" }, "VpcConfiguration":{ "shape":"VpcConfiguration", - "documentation":"

Contains the Virtual Private Cloud (VPC) configuration for the specified access point.

" + "documentation":"

Contains the virtual private cloud (VPC) configuration for the specified access point.

" }, "PublicAccessBlockConfiguration":{"shape":"PublicAccessBlockConfiguration"}, "CreationDate":{ @@ -651,13 +658,13 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to retrieve tags for.

", + "documentation":"

The AWS account ID associated with the Amazon S3 Batch Operations job.

", "location":"header", "locationName":"x-amz-account-id" }, "JobId":{ "shape":"JobId", - "documentation":"

The ID for the job whose tags you want to retrieve.

", + "documentation":"

The ID for the Amazon S3 Batch Operations job whose tags you want to retrieve.

", "location":"uri", "locationName":"id" } @@ -668,7 +675,7 @@ "members":{ "Tags":{ "shape":"S3TagSet", - "documentation":"

The set of tags associated with the job.

" + "documentation":"

The set of tags associated with the Amazon S3 Batch Operations job.

" } } }, @@ -697,7 +704,8 @@ "IAMRoleArn":{ "type":"string", "max":2048, - "min":1 + "min":1, + "pattern":"arn:[^:]+:iam::\\d{12}:role/.*" }, "IdempotencyException":{ "type":"structure", @@ -736,7 +744,8 @@ "JobArn":{ "type":"string", "max":1024, - "min":1 + "min":1, + "pattern":"arn:[^:]+:s3:[a-zA-Z0-9\\-]+:\\d{12}:job\\/.*" }, "JobCreationTime":{"type":"timestamp"}, "JobDescriptor":{ @@ -810,7 +819,7 @@ }, "RoleArn":{ "shape":"IAMRoleArn", - "documentation":"

The Amazon Resource Name (ARN) for the Identity and Access Management (IAM) Role assigned to execute the tasks for this job.

", + "documentation":"

The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role assigned to execute the tasks for this job.

", "box":true }, "SuspendedDate":{ @@ -857,7 +866,8 @@ "JobId":{ "type":"string", "max":36, - "min":5 + "min":5, + "pattern":"[a-zA-Z0-9\\-\\_]+" }, "JobListDescriptor":{ "type":"structure", @@ -1013,6 +1023,14 @@ "shape":"S3InitiateRestoreObjectOperation", "documentation":"

Directs the specified job to execute an Initiate Glacier Restore call on each object in the manifest.

", "box":true + }, + "S3PutObjectLegalHold":{ + "shape":"S3SetObjectLegalHoldOperation", + "box":true + }, + "S3PutObjectRetention":{ + "shape":"S3SetObjectRetentionOperation", + "box":true } }, "documentation":"

The operation that you want this job to perform on each object listed in the manifest. For more information about the available operations, see Available Operations in the Amazon Simple Storage Service Developer Guide.

" @@ -1134,7 +1152,7 @@ "type":"structure", "members":{ "FunctionArn":{ - "shape":"NonEmptyMaxLength1024String", + "shape":"FunctionArnString", "documentation":"

The Amazon Resource Name (ARN) for the AWS Lambda function that the specified job will invoke for each object in the manifest.

" } }, @@ -1200,7 +1218,7 @@ "locationName":"jobStatuses" }, "NextToken":{ - "shape":"NonEmptyMaxLength1024String", + "shape":"StringForNextToken", "documentation":"

A pagination token to request the next page of results. Use the token that Amazon S3 returned in the NextToken element of the ListJobsResult from the previous List Jobs request.

", "location":"querystring", "locationName":"nextToken" @@ -1218,7 +1236,7 @@ "type":"structure", "members":{ "NextToken":{ - "shape":"NonEmptyMaxLength1024String", + "shape":"StringForNextToken", "documentation":"

If the List Jobs request produced more than the maximum number of results, you can pass this value into a subsequent List Jobs request in order to retrieve the next page of results.

" }, "Jobs":{ @@ -1288,7 +1306,9 @@ "S3PutObjectCopy", "S3PutObjectAcl", "S3PutObjectTagging", - "S3InitiateRestoreObject" + "S3InitiateRestoreObject", + "S3PutObjectLegalHold", + "S3PutObjectRetention" ] }, "Policy":{"type":"string"}, @@ -1365,19 +1385,19 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The account ID for the Amazon Web Services account associated with the Amazon S3 batch operations job you want to replace tags on.

", + "documentation":"

The AWS account ID associated with the Amazon S3 Batch Operations job.

", "location":"header", "locationName":"x-amz-account-id" }, "JobId":{ "shape":"JobId", - "documentation":"

The ID for the job whose tags you want to replace.

", + "documentation":"

The ID for the Amazon S3 Batch Operations job whose tags you want to replace.

", "location":"uri", "locationName":"id" }, "Tags":{ "shape":"S3TagSet", - "documentation":"

The set of tags to associate with the job.

" + "documentation":"

The set of tags to associate with the Amazon S3 Batch Operations job.

" } } }, @@ -1454,7 +1474,8 @@ "S3BucketArnString":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":"arn:[^:]+:s3:.*" }, "S3CannedAccessControlList":{ "type":"string", @@ -1531,18 +1552,18 @@ }, "ObjectLockLegalHoldStatus":{ "shape":"S3ObjectLockLegalHoldStatus", - "documentation":"

" + "documentation":"

The Legal Hold status to be applied to all objects in the Batch Operations job.

" }, "ObjectLockMode":{ "shape":"S3ObjectLockMode", - "documentation":"

" + "documentation":"

The Retention mode to be applied to all objects in the Batch Operations job.

" }, "ObjectLockRetainUntilDate":{ "shape":"TimeStamp", - "documentation":"

" + "documentation":"

The date when the applied Object Retention configuration will expire on all objects in the Batch Operations job.

" } }, - "documentation":"

Contains the configuration parameters for a PUT Copy object operation. Amazon S3 batch operations passes each value through to the underlying PUT Copy object API. For more information about the parameters for this operation, see PUT Object - Copy.

" + "documentation":"

Contains the configuration parameters for a PUT Copy object operation. Amazon S3 Batch Operations passes each value through to the underlying PUT Copy object API. For more information about the parameters for this operation, see PUT Object - Copy.

" }, "S3ExpirationInDays":{ "type":"integer", @@ -1612,12 +1633,13 @@ "documentation":"

" } }, - "documentation":"

Contains the configuration parameters for an Initiate Glacier Restore job. Amazon S3 batch operations passes each value through to the underlying POST Object restore API. For more information about the parameters for this operation, see Restoring Archives.

" + "documentation":"

Contains the configuration parameters for an Initiate Glacier Restore job. Amazon S3 Batch Operations passes each value through to the underlying POST Object restore API. For more information about the parameters for this operation, see Restoring Archives.

" }, "S3KeyArnString":{ "type":"string", "max":2000, - "min":1 + "min":1, + "pattern":"arn:[^:]+:s3:.*" }, "S3MetadataDirective":{ "type":"string", @@ -1626,6 +1648,17 @@ "REPLACE" ] }, + "S3ObjectLockLegalHold":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"S3ObjectLockLegalHoldStatus", + "documentation":"

The Legal Hold status to be applied to all objects in the Batch Operations job.

" + } + }, + "documentation":"

" + }, "S3ObjectLockLegalHoldStatus":{ "type":"string", "enum":[ @@ -1640,6 +1673,13 @@ "GOVERNANCE" ] }, + "S3ObjectLockRetentionMode":{ + "type":"string", + "enum":[ + "COMPLIANCE", + "GOVERNANCE" + ] + }, "S3ObjectMetadata":{ "type":"structure", "members":{ @@ -1720,6 +1760,20 @@ "WRITE_ACP" ] }, + "S3Retention":{ + "type":"structure", + "members":{ + "RetainUntilDate":{ + "shape":"TimeStamp", + "documentation":"

The date when the applied Object Retention will expire on all objects in the Batch Operations job.

" + }, + "Mode":{ + "shape":"S3ObjectLockRetentionMode", + "documentation":"

The Retention mode to be applied to all objects in the Batch Operations job.

" + } + }, + "documentation":"

" + }, "S3SSEAlgorithm":{ "type":"string", "enum":[ @@ -1735,7 +1789,34 @@ "documentation":"

" } }, - "documentation":"

Contains the configuration parameters for a Set Object ACL operation. Amazon S3 batch operations passes each value through to the underlying PUT Object acl API. For more information about the parameters for this operation, see PUT Object acl.

" + "documentation":"

Contains the configuration parameters for a Set Object ACL operation. Amazon S3 Batch Operations passes each value through to the underlying PUT Object acl API. For more information about the parameters for this operation, see PUT Object acl.

" + }, + "S3SetObjectLegalHoldOperation":{ + "type":"structure", + "required":["LegalHold"], + "members":{ + "LegalHold":{ + "shape":"S3ObjectLockLegalHold", + "documentation":"

The Legal Hold contains the status to be applied to all objects in the Batch Operations job.

" + } + }, + "documentation":"

Contains the configuration parameters for a Set Object Legal Hold operation. Amazon S3 Batch Operations passes each value through to the underlying PUT Object Legal Hold API. For more information about the parameters for this operation, see PUT Object Legal Hold.

" + }, + "S3SetObjectRetentionOperation":{ + "type":"structure", + "required":["Retention"], + "members":{ + "BypassGovernanceRetention":{ + "shape":"Boolean", + "documentation":"

Indicates if the operation should be applied to objects in the Batch Operations job even if they have Governance-type Object Lock in place.

", + "box":true + }, + "Retention":{ + "shape":"S3Retention", + "documentation":"

Amazon S3 object lock Retention contains the retention mode to be applied to all objects in the Batch Operations job.

" + } + }, + "documentation":"

Contains the configuration parameters for a Set Object Retention operation. Amazon S3 Batch Operations passes each value through to the underlying PUT Object Retention API. For more information about the parameters for this operation, see PUT Object Retention.

" }, "S3SetObjectTaggingOperation":{ "type":"structure", @@ -1745,7 +1826,7 @@ "documentation":"

" } }, - "documentation":"

Contains the configuration parameters for a Set Object Tagging operation. Amazon S3 batch operations passes each value through to the underlying PUT Object tagging API. For more information about the parameters for this operation, see PUT Object tagging.

" + "documentation":"

Contains the configuration parameters for a Set Object Tagging operation. Amazon S3 Batch Operations passes each value through to the underlying PUT Object tagging API. For more information about the parameters for this operation, see PUT Object tagging.

" }, "S3StorageClass":{ "type":"string", @@ -1766,11 +1847,11 @@ ], "members":{ "Key":{ - "shape":"NonEmptyMaxLength1024String", + "shape":"TagKeyString", "documentation":"

" }, "Value":{ - "shape":"MaxLength1024String", + "shape":"TagValueString", "documentation":"

" } }, @@ -1787,12 +1868,29 @@ "max":8192 }, "Setting":{"type":"boolean"}, + "StringForNextToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[A-Za-z0-9\\+\\:\\/\\=\\?\\#-_]+$" + }, "SuspendedCause":{ "type":"string", "max":1024, "min":1 }, "SuspendedDate":{"type":"timestamp"}, + "TagKeyString":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:=+\\-@%]*)$" + }, + "TagValueString":{ + "type":"string", + "max":1024, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:=+\\-@%]*)$" + }, "TimeStamp":{"type":"timestamp"}, "TooManyRequestsException":{ "type":"structure", @@ -1807,6 +1905,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, + "documentation":"

", "exception":true }, "UpdateJobPriorityRequest":{ @@ -1914,7 +2013,7 @@ "documentation":"

If this field is specified, this access point will only allow connections from the specified VPC ID.

" } }, - "documentation":"

The Virtual Private Cloud (VPC) configuration for an access point.

" + "documentation":"

The virtual private cloud (VPC) configuration for an access point.

" }, "VpcId":{ "type":"string", diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index 291b121ea6d0..3d940b236d4d 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index 3a007f8e1679..7388ca20754a 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -60,7 +60,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a running App for the specified UserProfile. Supported Apps are JupyterServer and KernelGateway. This operation is automatically invoked by Amazon SageMaker Amazon SageMaker Studio (Studio) upon access to the associated Studio Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. Apps will automatically terminate and be deleted when stopped from within Studio, or when the DeleteApp API is manually called. UserProfiles are limited to 5 concurrently running Apps at a time.

" + "documentation":"

Creates a running App for the specified UserProfile. Supported Apps are JupyterServer and KernelGateway. This operation is automatically invoked by Amazon SageMaker Studio upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously.

" }, "CreateAutoMLJob":{ "name":"CreateAutoMLJob", @@ -112,7 +112,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a Domain for Amazon SageMaker Amazon SageMaker Studio (Studio), which can be accessed by end-users in a web browser. A Domain has an associated directory, list of authorized users, and a variety of security, application, policies, and Amazon Virtual Private Cloud configurations. An AWS account is limited to one Domain, per region. Users within a domain can share notebook files and other artifacts with each other. When a Domain is created, an Amazon Elastic File System (EFS) is also created for use by all of the users within the Domain. Each user receives a private home directory within the EFS for notebooks, Git repositories, and data files.

" + "documentation":"

Creates a Domain used by SageMaker Studio. A domain consists of an associated directory, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An AWS account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

When a domain is created, an Amazon Elastic File System (EFS) volume is also created for use by all of the users within the domain. Each user receives a private home directory within the EFS for notebooks, Git repositories, and data files.

All traffic between the domain and the EFS volume is communicated through the specified subnet IDs. All other traffic goes over the Internet through an Amazon SageMaker system VPC. The EFS traffic uses the NFS/TCP protocol over port 2049.

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a SageMaker Studio app successfully.

" }, "CreateEndpoint":{ "name":"CreateEndpoint", @@ -125,7 +125,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

" + "documentation":"

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

" }, "CreateEndpointConfig":{ "name":"CreateEndpointConfig", @@ -138,7 +138,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define a ProductionVariant, for each model that you want to deploy. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

" + "documentation":"

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define a ProductionVariant, for each model that you want to deploy. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

" }, "CreateExperiment":{ "name":"CreateExperiment", @@ -283,7 +283,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to Amazon SageMaker Amazon SageMaker Studio (Studio), and granted access to all of the Apps and files associated with that Amazon Elastic File System (EFS). This operation can only be called when AuthMode equals IAM.

" + "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to Amazon SageMaker Studio, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System (EFS) volume. This operation can only be called when the authentication mode equals IAM.

" }, "CreatePresignedNotebookInstanceUrl":{ "name":"CreatePresignedNotebookInstanceUrl", @@ -293,7 +293,7 @@ }, "input":{"shape":"CreatePresignedNotebookInstanceUrlInput"}, "output":{"shape":"CreatePresignedNotebookInstanceUrlOutput"}, - "documentation":"

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.For example, you can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

" + "documentation":"

Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.

You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

" }, "CreateProcessingJob":{ "name":"CreateProcessingJob", @@ -379,7 +379,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a new user profile. A user profile represents a single user within a Domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting and other user-oriented features. This entity is created during on-boarding. If an administrator invites a person by email or imports them from SSO, a new UserProfile is automatically created. This entity is the primary holder of settings for an individual user and has a reference to the user's private Amazon Elastic File System (EFS) home directory.

" + "documentation":"

Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting, and other user-oriented features. This entity is created when a user onboards to Amazon SageMaker Studio. If an administrator invites a person by email or imports them from SSO, a user profile is automatically created. A user profile is the primary holder of settings for an individual user and has a reference to the user's private Amazon Elastic File System (EFS) home directory.

" }, "CreateWorkteam":{ "name":"CreateWorkteam", @@ -437,7 +437,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Used to delete a domain. If you on-boarded with IAM mode, you will need to delete your domain to on-board again using SSO. Use with caution. All of the members of the domain will lose access to their EFS volume, including data, notebooks, and other artifacts.

" + "documentation":"

Used to delete a domain. If you onboarded with IAM mode, you will need to delete your domain to onboard again using SSO. Use with caution. All of the members of the domain will lose access to their EFS volume, including data, notebooks, and other artifacts.

" }, "DeleteEndpoint":{ "name":"DeleteEndpoint", @@ -455,7 +455,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteEndpointConfigInput"}, - "documentation":"

Deletes an endpoint configuration. The DeleteEndpointConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.

" + "documentation":"

Deletes an endpoint configuration. The DeleteEndpointConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. If you delete the EndpointConfig of an endpoint that is active or being created or updated you may lose visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring charges.

" }, "DeleteExperiment":{ "name":"DeleteExperiment", @@ -483,6 +483,19 @@ ], "documentation":"

Deletes the specified flow definition.

" }, + "DeleteHumanTaskUi":{ + "name":"DeleteHumanTaskUi", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHumanTaskUiRequest"}, + "output":{"shape":"DeleteHumanTaskUiResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Use this operation to delete a worker task template (HumanTaskUi).

To see a list of human task user interfaces (work task templates) in your account, use . When you delete a worker task template, it no longer appears when you call ListHumanTaskUis.

" + }, "DeleteModel":{ "name":"DeleteModel", "http":{ @@ -578,7 +591,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Deletes a user profile.

" + "documentation":"

Deletes a user profile. When a user profile is deleted, the user loses access to their EFS volume, including data, notebooks, and other artifacts.

" }, "DeleteWorkteam":{ "name":"DeleteWorkteam", @@ -663,7 +676,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

The desciption of the domain.

" + "documentation":"

The description of the domain.

" }, "DescribeEndpoint":{ "name":"DescribeEndpoint", @@ -722,7 +735,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Returns information about the requested human task user interface.

" + "documentation":"

Returns information about the requested human task user interface (worker task template).

" }, "DescribeHyperParameterTuningJob":{ "name":"DescribeHyperParameterTuningJob", @@ -889,7 +902,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Describes the user profile.

" + "documentation":"

Describes a user profile. For more information, see CreateUserProfile.

" }, "DescribeWorkforce":{ "name":"DescribeWorkforce", @@ -1277,7 +1290,7 @@ }, "input":{"shape":"SearchRequest"}, "output":{"shape":"SearchResponse"}, - "documentation":"

Finds Amazon SageMaker resources that match a search query. Matching resource objects are returned as a list of SearchResult objects in the response. You can sort the search results by any resource property in a ascending or descending order.

You can query against the following value types: numeric, text, Boolean, and timestamp.

" + "documentation":"

Finds Amazon SageMaker resources that match a search query. Matching resources are returned as a list of SearchRecord objects in the response. You can sort the search results by any resource property in a ascending or descending order.

You can query against the following value types: numeric, text, Boolean, and timestamp.

" }, "StartMonitoringSchedule":{ "name":"StartMonitoringSchedule", @@ -1431,7 +1444,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Updates a domain. Changes will impact all of the people in the domain.

" + "documentation":"

Updates the default settings for new user profiles in the domain.

" }, "UpdateEndpoint":{ "name":"UpdateEndpoint", @@ -1444,7 +1457,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

" + "documentation":"

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When Amazon SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

If you delete the EndpointConfig of an endpoint that is active or being created or updated you may lose visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring charges.

" }, "UpdateEndpointWeightsAndCapacities":{ "name":"UpdateEndpointWeightsAndCapacities", @@ -1564,7 +1577,7 @@ }, "input":{"shape":"UpdateWorkforceRequest"}, "output":{"shape":"UpdateWorkforceResponse"}, - "documentation":"

Restricts access to tasks assigned to workers in the specified workforce to those within specific ranges of IP addresses. You specify allowed IP addresses by creating a list of up to four CIDRs.

By default, a workforce isn't restricted to specific IP addresses. If you specify a range of IP addresses, workers who attempt to access tasks using any IP address outside the specified range are denied access and get a Not Found error message on the worker portal. After restricting access with this operation, you can see the allowed IP values for a private workforce with the operation.

This operation applies only to private workforces.

" + "documentation":"

Restricts access to tasks assigned to workers in the specified workforce to those within specific ranges of IP addresses. You specify allowed IP addresses by creating a list of up to ten CIDRs.

By default, a workforce isn't restricted to specific IP addresses. If you specify a range of IP addresses, workers who attempt to access tasks using any IP address outside the specified range are denied access and get a Not Found error message on the worker portal. After restricting access with this operation, you can see the allowed IP values for a private workforce with the operation.

This operation applies only to private workforces.

" }, "UpdateWorkteam":{ "name":"UpdateWorkteam", @@ -1804,10 +1817,10 @@ "members":{ "AnnotationConsolidationLambdaArn":{ "shape":"LambdaFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

  • Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox

  • Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass

  • Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel

  • Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation

  • Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass

  • Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel

  • Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition

  • Bounding box verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox

  • Semantic segmentation verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation

  • Bounding box adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox

  • Semantic segmentation adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

    arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation

For more information, see Annotation Consolidation.

" + "documentation":"

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.

This parameter is required for all labeling jobs. For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for AnnotationConsolidationLambdaArn. For custom labeling workflows, see Post-annotation Lambda.

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition

3D point cloud object detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection

3D point cloud object tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking

3D point cloud semantic segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation

Use the following ARNs for Label Verification and Adjustment Jobs

Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels .

Semantic segmentation adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation

Semantic segmentation verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation

Bounding box verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox

Bounding box adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox

3D point cloud object detection adjustment - Use this task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection

3D point cloud object tracking adjustment - Use this task type when you want workers to adjust 3D cuboids around objects that appear in a sequence of 3D point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking

3D point cloud semantic segmentation adjustment - Use this task type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation

" } }, - "documentation":"

Configures how labels are consolidated across human workers.

" + "documentation":"

Configures how labels are consolidated across human workers and processes output data.

" }, "AppArn":{ "type":"string", @@ -3118,7 +3131,7 @@ }, "ResourceSpec":{ "shape":"ResourceSpec", - "documentation":"

The instance type and quantity.

" + "documentation":"

The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" } } }, @@ -3127,7 +3140,7 @@ "members":{ "AppArn":{ "shape":"AppArn", - "documentation":"

The app's Amazon Resource Name (ARN).

" + "documentation":"

The App's Amazon Resource Name (ARN).

" } } }, @@ -3273,7 +3286,7 @@ }, "AuthMode":{ "shape":"AuthMode", - "documentation":"

The mode of authentication that member use to access the domain.

" + "documentation":"

The mode of authentication that members use to access the domain.

" }, "DefaultUserSettings":{ "shape":"UserSettings", @@ -3281,19 +3294,19 @@ }, "SubnetIds":{ "shape":"Subnets", - "documentation":"

Security setting to limit to a set of subnets.

" + "documentation":"

The VPC subnets to use for communication with the EFS volume.

" }, "VpcId":{ "shape":"VpcId", - "documentation":"

Security setting to limit the domain's communication to a Amazon Virtual Private Cloud.

" + "documentation":"

The ID of the Amazon Virtual Private Cloud (VPC) to use for communication with the EFS volume.

" }, "Tags":{ "shape":"TagList", - "documentation":"

Each tag consists of a key and an optional value. Tag keys must be unique per resource.

" + "documentation":"

Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.

" }, "HomeEfsFileSystemKmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service encryption key ID.

" + "documentation":"

The AWS Key Management Service (KMS) encryption key ID. Encryption with a customer master key (CMK) is not supported.

" } } }, @@ -3421,7 +3434,10 @@ "shape":"FlowDefinitionName", "documentation":"

The name of your flow definition.

" }, - "HumanLoopRequestSource":{"shape":"HumanLoopRequestSource"}, + "HumanLoopRequestSource":{ + "shape":"HumanLoopRequestSource", + "documentation":"

Container for configuring the source of human task requests. Use to specify if Amazon Rekognition or Amazon Textract is used as an integration source.

" + }, "HumanLoopActivationConfig":{ "shape":"HumanLoopActivationConfig", "documentation":"

An object containing information about the events that trigger a human workflow.

" @@ -3503,7 +3519,7 @@ }, "TrainingJobDefinitions":{ "shape":"HyperParameterTrainingJobDefinitions", - "documentation":"

" + "documentation":"

A list of the HyperParameterTrainingJobDefinition objects launched for this tuning job.

" }, "WarmStartConfig":{ "shape":"HyperParameterTuningJobWarmStartConfig", @@ -3558,7 +3574,7 @@ }, "LabelCategoryConfigS3Uri":{ "shape":"S3Uri", - "documentation":"

The S3 URL of the file that defines the categories used to label the data objects.

The file is a JSON structure in the following format:

{

\"document-version\": \"2018-11-28\"

\"labels\": [

{

\"label\": \"label 1\"

},

{

\"label\": \"label 2\"

},

...

{

\"label\": \"label n\"

}

]

}

" + "documentation":"

The S3 URL of the file that defines the categories used to label the data objects.

For 3D point cloud task types, see Create a Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.

For all other built-in task types and custom tasks, your label category configuration file must be a JSON file in the following format. Identify the labels you want to use by replacing label_1, label_2,...,label_n with your label categories.

{

\"document-version\": \"2018-11-28\"

\"labels\": [

{

\"label\": \"label_1\"

},

{

\"label\": \"label_2\"

},

...

{

\"label\": \"label_n\"

}

]

}

" }, "StoppingConditions":{ "shape":"LabelingJobStoppingConditions", @@ -4031,6 +4047,10 @@ "shape":"MaxConcurrentTransforms", "documentation":"

The maximum number of parallel requests that can be sent to each instance in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, Amazon SageMaker checks the optional execution-parameters to determine the settings for your chosen algorithm. If the execution-parameters endpoint is not enabled, the default value is 1. For more information on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms.

" }, + "ModelClientConfig":{ + "shape":"ModelClientConfig", + "documentation":"

Configures the timeout and maximum number of retries for processing a transform job invocation.

" + }, "MaxPayloadInMB":{ "shape":"MaxPayloadInMB", "documentation":"

The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without metadata). The value in MaxPayloadInMB must be greater than, or equal to, the size of a single record. To estimate the size of a record in MB, divide the size of your dataset by the number of records. To ensure that the records fit within the maximum payload size, we recommend using a slightly larger value. The default value is 6 MB.

For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the value to 0. This feature works only in supported algorithms. Currently, Amazon SageMaker built-in algorithms do not support HTTP chunked encoding.

" @@ -4521,7 +4541,7 @@ }, "RetentionPolicy":{ "shape":"RetentionPolicy", - "documentation":"

The retention policy for this domain, which specifies which resources will be retained after the Domain is deleted. By default, all resources are retained (not automatically deleted).

" + "documentation":"

The retention policy for this domain, which specifies whether resources will be retained after the Domain is deleted. By default, all resources are retained (not automatically deleted).

" } } }, @@ -4579,6 +4599,21 @@ "members":{ } }, + "DeleteHumanTaskUiRequest":{ + "type":"structure", + "required":["HumanTaskUiName"], + "members":{ + "HumanTaskUiName":{ + "shape":"HumanTaskUiName", + "documentation":"

The name of the human task user interface (work task template) you want to delete.

" + } + } + }, + "DeleteHumanTaskUiResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteModelInput":{ "type":"structure", "required":["ModelName"], @@ -4887,7 +4922,7 @@ }, "ResourceSpec":{ "shape":"ResourceSpec", - "documentation":"

The instance type and quantity.

" + "documentation":"

The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" } } }, @@ -5372,7 +5407,10 @@ "shape":"Timestamp", "documentation":"

The timestamp when the flow definition was created.

" }, - "HumanLoopRequestSource":{"shape":"HumanLoopRequestSource"}, + "HumanLoopRequestSource":{ + "shape":"HumanLoopRequestSource", + "documentation":"

Container for configuring the source of human task requests. Used to specify if Amazon Rekognition or Amazon Textract is used as an integration source.

" + }, "HumanLoopActivationConfig":{ "shape":"HumanLoopActivationConfig", "documentation":"

An object containing information about what triggers a human review workflow.

" @@ -5391,7 +5429,7 @@ }, "FailureReason":{ "shape":"FailureReason", - "documentation":"

" + "documentation":"

The reason your flow definition failed.

" } } }, @@ -5401,7 +5439,7 @@ "members":{ "HumanTaskUiName":{ "shape":"HumanTaskUiName", - "documentation":"

The name of the human task user interface you want information about.

" + "documentation":"

The name of the human task user interface (worker task template) you want information about.

" } } }, @@ -5416,11 +5454,15 @@ "members":{ "HumanTaskUiArn":{ "shape":"HumanTaskUiArn", - "documentation":"

The Amazon Resource Name (ARN) of the human task user interface.

" + "documentation":"

The Amazon Resource Name (ARN) of the human task user interface (worker task template).

" }, "HumanTaskUiName":{ "shape":"HumanTaskUiName", - "documentation":"

The name of the human task user interface.

" + "documentation":"

The name of the human task user interface (worker task template).

" + }, + "HumanTaskUiStatus":{ + "shape":"HumanTaskUiStatus", + "documentation":"

The status of the human task user interface (worker task template). Valid values are listed below.

" }, "CreationTime":{ "shape":"Timestamp", @@ -5435,7 +5477,7 @@ "members":{ "HyperParameterTuningJobName":{ "shape":"HyperParameterTuningJobName", - "documentation":"

The name of the tuning job to describe.

" + "documentation":"

The name of the tuning job.

" } } }, @@ -5469,7 +5511,7 @@ }, "TrainingJobDefinitions":{ "shape":"HyperParameterTrainingJobDefinitions", - "documentation":"

" + "documentation":"

A list of the HyperParameterTrainingJobDefinition objects launched for this tuning job.

" }, "HyperParameterTuningJobStatus":{ "shape":"HyperParameterTuningJobStatus", @@ -6090,7 +6132,7 @@ }, "AutoMLJobArn":{ "shape":"AutoMLJobArn", - "documentation":"

" + "documentation":"

The Amazon Resource Name (ARN) of an AutoML job.

" }, "ModelArtifacts":{ "shape":"ModelArtifacts", @@ -6244,6 +6286,10 @@ "shape":"MaxConcurrentTransforms", "documentation":"

The maximum number of parallel requests on each instance node that can be launched in a transform job. The default value is 1.

" }, + "ModelClientConfig":{ + "shape":"ModelClientConfig", + "documentation":"

The timeout and maximum number of retries for processing a transform job invocation.

" + }, "MaxPayloadInMB":{ "shape":"MaxPayloadInMB", "documentation":"

The maximum payload size, in MB, used in the transform job.

" @@ -6286,7 +6332,7 @@ }, "AutoMLJobArn":{ "shape":"AutoMLJobArn", - "documentation":"

" + "documentation":"

The Amazon Resource Name (ARN) of the AutoML transform job.

" }, "DataProcessing":{"shape":"DataProcessing"}, "ExperimentConfig":{"shape":"ExperimentConfig"} @@ -6440,7 +6486,7 @@ "members":{ "DomainId":{ "shape":"DomainId", - "documentation":"

The domain ID.

" + "documentation":"

The ID of the domain that contains the profile.

" }, "UserProfileArn":{ "shape":"UserProfileArn", @@ -6452,7 +6498,7 @@ }, "HomeEfsFileSystemUid":{ "shape":"EfsUid", - "documentation":"

The homa Amazon Elastic File System (EFS) Uid.

" + "documentation":"

The ID of the user's profile in the Amazon Elastic File System (EFS) volume.

" }, "Status":{ "shape":"UserProfileStatus", @@ -6851,11 +6897,6 @@ "min":1, "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" }, - "EnvironmentArn":{ - "type":"string", - "max":256, - "pattern":"^arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:environment/[a-z0-9](-*[a-z0-9]){0,62}$" - }, "EnvironmentKey":{ "type":"string", "max":1024, @@ -6924,7 +6965,7 @@ "documentation":"

The list of tags that are associated with the experiment. You can use Search API to search on the tags.

" } }, - "documentation":"

A summary of the properties of an experiment as returned by the Search API.

" + "documentation":"

The properties of an experiment as returned by the Search API.

" }, "ExperimentArn":{ "type":"string", @@ -7068,18 +7109,18 @@ "members":{ "Name":{ "shape":"ResourcePropertyName", - "documentation":"

A property name. For example, TrainingJobName. For the list of valid property names returned in a search result for each supported resource, see TrainingJob properties. You must specify a valid property name for the resource.

" + "documentation":"

A resource property name. For example, TrainingJobName. For valid property names, see SearchRecord. You must specify a valid property for the resource.

" }, "Operator":{ "shape":"Operator", - "documentation":"

A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:

Equals

The specified resource in Name equals the specified Value.

NotEquals

The specified resource in Name does not equal the specified Value.

GreaterThan

The specified resource in Name is greater than the specified Value. Not supported for text-based properties.

GreaterThanOrEqualTo

The specified resource in Name is greater than or equal to the specified Value. Not supported for text-based properties.

LessThan

The specified resource in Name is less than the specified Value. Not supported for text-based properties.

LessThanOrEqualTo

The specified resource in Name is less than or equal to the specified Value. Not supported for text-based properties.

Contains

Only supported for text-based properties. The word-list of the property contains the specified Value. A SearchExpression can include only one Contains operator.

If you have specified a filter Value, the default is Equals.

" + "documentation":"

A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:

Equals

The value of Name equals Value.

NotEquals

The value of Name doesn't equal Value.

Exists

The Name property exists.

NotExists

The Name property does not exist.

GreaterThan

The value of Name is greater than Value. Not supported for text properties.

GreaterThanOrEqualTo

The value of Name is greater than or equal to Value. Not supported for text properties.

LessThan

The value of Name is less than Value. Not supported for text properties.

LessThanOrEqualTo

The value of Name is less than or equal to Value. Not supported for text properties.

In

The value of Name is one of the comma delimited strings in Value. Only supported for text properties.

Contains

The value of Name contains the string Value. Only supported for text properties.

A SearchExpression can include the Contains operator multiple times when the value of Name is one of the following:

  • Experiment.DisplayName

  • Experiment.ExperimentName

  • Experiment.Tags

  • Trial.DisplayName

  • Trial.TrialName

  • Trial.Tags

  • TrialComponent.DisplayName

  • TrialComponent.TrialComponentName

  • TrialComponent.Tags

  • TrialComponent.InputArtifacts

  • TrialComponent.OutputArtifacts

A SearchExpression can include only one Contains operator for all other values of Name. In these cases, if you include multiple Contains operators in the SearchExpression, the result is the following error message: \"'CONTAINS' operator usage limit of 1 exceeded.\"

" }, "Value":{ "shape":"FilterValue", - "documentation":"

A value used with Resource and Operator to determine if objects satisfy the filter's condition. For numerical properties, Value must be an integer or floating-point decimal. For timestamp properties, Value must be an ISO 8601 date-time string of the following format: YYYY-mm-dd'T'HH:MM:SS.

" + "documentation":"

A value used with Name and Operator to determine which resources satisfy the filter's condition. For numerical properties, Value must be an integer or floating-point decimal. For timestamp properties, Value must be an ISO 8601 date-time string of the following format: YYYY-mm-dd'T'HH:MM:SS.

" } }, - "documentation":"

A conditional statement for a search expression that includes a resource property, a Boolean operator, and a value.

If you don't specify an Operator and a Value, the filter searches for only the specified property. For example, defining a Filter for the FailureReason for the TrainingJob Resource searches for training job objects that have a value in the FailureReason field.

If you specify a Value, but not an Operator, Amazon SageMaker uses the equals operator as the default.

In search, there are several property types:

Metrics

To define a metric filter, enter a value using the form \"Metrics.<name>\", where <name> is a metric name. For example, the following filter searches for training jobs with an \"accuracy\" metric greater than \"0.9\":

{

\"Name\": \"Metrics.accuracy\",

\"Operator\": \"GREATER_THAN\",

\"Value\": \"0.9\"

}

HyperParameters

To define a hyperparameter filter, enter a value with the form \"HyperParameters.<name>\". Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value is also a decimal value. If the specified Value is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a \"learning_rate\" hyperparameter that is less than \"0.5\":

{

\"Name\": \"HyperParameters.learning_rate\",

\"Operator\": \"LESS_THAN\",

\"Value\": \"0.5\"

}

Tags

To define a tag filter, enter a value with the form \"Tags.<key>\".

" + "documentation":"

A conditional statement for a search expression that includes a resource property, a Boolean operator, and a value. Resources that match the statement are returned in the results from the Search API.

If you specify a Value, but not an Operator, Amazon SageMaker uses the equals operator.

In search, there are several property types:

Metrics

To define a metric filter, enter a value using the form \"Metrics.<name>\", where <name> is a metric name. For example, the following filter searches for training jobs with an \"accuracy\" metric greater than \"0.9\":

{

\"Name\": \"Metrics.accuracy\",

\"Operator\": \"GreaterThan\",

\"Value\": \"0.9\"

}

HyperParameters

To define a hyperparameter filter, enter a value with the form \"HyperParameters.<name>\". Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value is also a decimal value. If the specified Value is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a \"learning_rate\" hyperparameter that is less than \"0.5\":

{

\"Name\": \"HyperParameters.learning_rate\",

\"Operator\": \"LessThan\",

\"Value\": \"0.5\"

}

Tags

To define a tag filter, enter a value with the form Tags.<key>.

" }, "FilterList":{ "type":"list", @@ -7262,7 +7303,8 @@ "MXNET", "ONNX", "PYTORCH", - "XGBOOST" + "XGBOOST", + "TFLITE" ] }, "GenerateCandidateDefinitionsOnly":{"type":"boolean"}, @@ -7272,7 +7314,7 @@ "members":{ "Resource":{ "shape":"ResourceType", - "documentation":"

The name of the Amazon SageMaker resource to Search for.

" + "documentation":"

The name of the Amazon SageMaker resource to search for.

" }, "SuggestionQuery":{ "shape":"SuggestionQuery", @@ -7436,7 +7478,7 @@ }, "PreHumanTaskLambdaArn":{ "shape":"LambdaFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.

For the built-in bounding box, image classification, semantic segmentation, and text classification task types, Amazon SageMaker Ground Truth provides the following Lambda functions:

US East (Northern Virginia) (us-east-1):

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation

US East (Ohio) (us-east-2):

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation

US West (Oregon) (us-west-2):

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation

Canada (Central) (ca-central-1):

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation

EU (Ireland) (eu-west-1):

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation

EU (London) (eu-west-2):

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation

EU Frankfurt (eu-central-1):

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation

Asia Pacific (Tokyo) (ap-northeast-1):

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation

Asia Pacific (Seoul) (ap-northeast-2):

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation

Asia Pacific (Mumbai) (ap-south-1):

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation

Asia Pacific (Singapore) (ap-southeast-1):

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation

Asia Pacific (Sydney) (ap-southeast-2):

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationBoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation

" + "documentation":"

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.

For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for PreHumanTaskLambdaArn. For custom labeling workflows, see Pre-annotation Lambda.

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition

3D Point Cloud Modalities

Use the following pre-annotation lambdas for 3D point cloud labeling modality tasks. See 3D Point Cloud Task types to learn more.

3D Point Cloud Object Detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection

3D Point Cloud Object Tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking

3D Point Cloud Semantic Segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation

Use the following ARNs for Label Verification and Adjustment Jobs

Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels .

Bounding box verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking

Bounding box adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox

Semantic segmentation verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation

Semantic segmentation adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation

3D point cloud object detection adjustment - Adjust 3D cuboids in a point cloud frame.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection

3D point cloud object tracking adjustment - Adjust 3D cuboids across a sequence of point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking

3D point cloud semantic segmentation adjustment - Adjust semantic segmentation masks in a 3D point cloud.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation

" }, "TaskKeywords":{ "shape":"TaskKeywords", @@ -7488,6 +7530,13 @@ "min":1, "pattern":"^[a-z0-9](-*[a-z0-9])*" }, + "HumanTaskUiStatus":{ + "type":"string", + "enum":[ + "Active", + "Deleting" + ] + }, "HumanTaskUiSummaries":{ "type":"list", "member":{"shape":"HumanTaskUiSummary"} @@ -8099,6 +8148,16 @@ "max":20, "min":0 }, + "InvocationsMaxRetries":{ + "type":"integer", + "max":3, + "min":0 + }, + "InvocationsTimeoutInSeconds":{ + "type":"integer", + "max":3600, + "min":1 + }, "JobReferenceCode":{ "type":"string", "min":1, @@ -8139,7 +8198,7 @@ "members":{ "DefaultResourceSpec":{ "shape":"ResourceSpec", - "documentation":"

The instance type and quantity.

" + "documentation":"

The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" } }, "documentation":"

Jupyter server's app settings.

" @@ -8149,7 +8208,7 @@ "members":{ "DefaultResourceSpec":{ "shape":"ResourceSpec", - "documentation":"

The instance type and quantity.

" + "documentation":"

The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" } }, "documentation":"

The kernel gateway app settings.

" @@ -10207,7 +10266,21 @@ "documentation":"

The path of the S3 object that contains the model artifacts. For example, s3://bucket-name/keynameprefix/model.tar.gz.

" } }, - "documentation":"

Provides information about the location that is configured for storing model artifacts.

" + "documentation":"

Provides information about the location that is configured for storing model artifacts.

Model artifacts are the output that results from training a model, and typically consist of trained parameters, a model defintion that desribes how to compute inferences, and other metadata.

" + }, + "ModelClientConfig":{ + "type":"structure", + "members":{ + "InvocationsTimeoutInSeconds":{ + "shape":"InvocationsTimeoutInSeconds", + "documentation":"

The timeout value in seconds for an invocation request.

" + }, + "InvocationsMaxRetries":{ + "shape":"InvocationsMaxRetries", + "documentation":"

The maximum number of retries when invocation requests are failing.

" + } + }, + "documentation":"

Configures the timeout and maximum number of retries for processing a transform job invocation.

" }, "ModelName":{ "type":"string", @@ -10833,7 +10906,7 @@ "documentation":"

A list of filters. Each filter acts on a property. Filters must contain at least one Filters value. For example, a NestedFilters call might include a filter on the PropertyName parameter of the InputDataConfig property: InputDataConfig.DataSource.S3DataSource.S3Uri.

" } }, - "documentation":"

Defines a list of NestedFilters objects. To satisfy the conditions specified in the NestedFilters call, a resource must satisfy the conditions of all of the filters.

For example, you could define a NestedFilters using the training job's InputDataConfig property to filter on Channel objects.

A NestedFilters object contains multiple filters. For example, to find all training jobs whose name contains train and that have cat/data in their S3Uri (specified in InputDataConfig), you need to create a NestedFilters object that specifies the InputDataConfig property with the following Filter objects:

  • '{Name:\"InputDataConfig.ChannelName\", \"Operator\":\"EQUALS\", \"Value\":\"train\"}',

  • '{Name:\"InputDataConfig.DataSource.S3DataSource.S3Uri\", \"Operator\":\"CONTAINS\", \"Value\":\"cat/data\"}'

" + "documentation":"

A list of nested Filter objects. A resource must satisfy the conditions of all filters to be included in the results returned from the Search API.

For example, to filter on a training job's InputDataConfig property with a specific channel name and S3Uri prefix, define the following filters:

  • '{Name:\"InputDataConfig.ChannelName\", \"Operator\":\"Equals\", \"Value\":\"train\"}',

  • '{Name:\"InputDataConfig.DataSource.S3DataSource.S3Uri\", \"Operator\":\"Contains\", \"Value\":\"mybucket/catdata\"}'

" }, "NestedFiltersList":{ "type":"list", @@ -10844,6 +10917,10 @@ "NetworkConfig":{ "type":"structure", "members":{ + "EnableInterContainerTrafficEncryption":{ + "shape":"Boolean", + "documentation":"

Whether to encrypt all communications between distributed processing jobs. Choose True to encrypt communications. Encryption provides greater security for distributed processing jobs, but the processing might take longer.

" + }, "EnableNetworkIsolation":{ "shape":"Boolean", "documentation":"

Whether to allow inbound and outbound network calls to and from the containers used for the processing job.

" @@ -11400,6 +11477,82 @@ "ml.r5.24xlarge" ] }, + "ProcessingJob":{ + "type":"structure", + "members":{ + "ProcessingInputs":{ + "shape":"ProcessingInputs", + "documentation":"

For each input, data is downloaded from S3 into the processing container before the processing job begins running if \"S3InputMode\" is set to File.

" + }, + "ProcessingOutputConfig":{"shape":"ProcessingOutputConfig"}, + "ProcessingJobName":{ + "shape":"ProcessingJobName", + "documentation":"

The name of the processing job.

" + }, + "ProcessingResources":{"shape":"ProcessingResources"}, + "StoppingCondition":{"shape":"ProcessingStoppingCondition"}, + "AppSpecification":{"shape":"AppSpecification"}, + "Environment":{ + "shape":"ProcessingEnvironmentMap", + "documentation":"

Sets the environment variables in the Docker container.

" + }, + "NetworkConfig":{"shape":"NetworkConfig"}, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the role used to create the processing job.

" + }, + "ExperimentConfig":{"shape":"ExperimentConfig"}, + "ProcessingJobArn":{ + "shape":"ProcessingJobArn", + "documentation":"

The ARN of the processing job.

" + }, + "ProcessingJobStatus":{ + "shape":"ProcessingJobStatus", + "documentation":"

The status of the processing job.

" + }, + "ExitMessage":{ + "shape":"ExitMessage", + "documentation":"

A string, up to one KB in size, that contains metadata from the processing container when the processing job exits.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

A string, up to one KB in size, that contains the reason a processing job failed, if it failed.

" + }, + "ProcessingEndTime":{ + "shape":"Timestamp", + "documentation":"

The time that the processing job ended.

" + }, + "ProcessingStartTime":{ + "shape":"Timestamp", + "documentation":"

The time that the processing job started.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time the processing job was last modified.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time the processing job was created.

" + }, + "MonitoringScheduleArn":{ + "shape":"MonitoringScheduleArn", + "documentation":"

The ARN of a monitoring schedule for an endpoint associated with this processing job.

" + }, + "AutoMLJobArn":{ + "shape":"AutoMLJobArn", + "documentation":"

The Amazon Resource Name (ARN) of the AutoML job associated with this processing job.

" + }, + "TrainingJobArn":{ + "shape":"TrainingJobArn", + "documentation":"

The ARN of the training job associated with this processing job.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + } + }, + "documentation":"

An Amazon SageMaker processing job that is used to analyze data and evaluate models. For more information, see Process Data and Evaluate Models.

" + }, "ProcessingJobArn":{ "type":"string", "max":256, @@ -11573,7 +11726,7 @@ }, "S3InputMode":{ "shape":"ProcessingS3InputMode", - "documentation":"

Wether to use File or Pipe input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local Amazon Elastic Block Store (Amazon EBS) volumes before starting your training algorithm. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your algorithm without using the EBS volume.

" + "documentation":"

Whether to use File or Pipe input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local Amazon Elastic Block Store (Amazon EBS) volumes before starting your training algorithm. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your algorithm without using the EBS volume.

" }, "S3DataDistributionType":{ "shape":"ProcessingS3DataDistributionType", @@ -11581,7 +11734,7 @@ }, "S3CompressionType":{ "shape":"ProcessingS3CompressionType", - "documentation":"

Whether to use Gzip compresion for Amazon S3 storage.

" + "documentation":"

Whether to use Gzip compression for Amazon S3 storage.

" } }, "documentation":"

Information about where and how you want to obtain the inputs for an processing job.

" @@ -11863,7 +12016,6 @@ "RenderUiTemplateRequest":{ "type":"structure", "required":[ - "UiTemplate", "Task", "RoleArn" ], @@ -11879,6 +12031,10 @@ "RoleArn":{ "shape":"RoleArn", "documentation":"

The Amazon Resource Name (ARN) that has access to the S3 objects that are used by the template.

" + }, + "HumanTaskUiArn":{ + "shape":"HumanTaskUiArn", + "documentation":"

The HumanTaskUiArn of the worker UI that you want to render. Do not provide a HumanTaskUiArn if you use the UiTemplate parameter.

See a list of available Human Ui Amazon Resource Names (ARNs) in UiConfig.

" } } }, @@ -12031,16 +12187,16 @@ "ResourceSpec":{ "type":"structure", "members":{ - "EnvironmentArn":{ - "shape":"EnvironmentArn", - "documentation":"

The Amazon Resource Name (ARN) of the environment.

" + "SageMakerImageArn":{ + "shape":"SageMakerImageArn", + "documentation":"

The Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" }, "InstanceType":{ "shape":"AppInstanceType", "documentation":"

The instance type.

" } }, - "documentation":"

The instance type and quantity.

" + "documentation":"

The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. The ARN is stored as metadata in SageMaker Studio notebooks.

" }, "ResourceType":{ "type":"string", @@ -12065,10 +12221,10 @@ "members":{ "HomeEfsFileSystem":{ "shape":"RetentionType", - "documentation":"

The home Amazon Elastic File System (EFS).

" + "documentation":"

The default is Retain, which specifies to keep the data stored on the EFS volume.

Specify Delete to delete the data stored on the EFS volume.

" } }, - "documentation":"

The retention policy.

" + "documentation":"

The retention policy for data stored on an Amazon Elastic File System (EFS) volume.

" }, "RetentionType":{ "type":"string", @@ -12134,7 +12290,7 @@ }, "S3Uri":{ "shape":"S3Uri", - "documentation":"

Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

  • A key name prefix might look like this: s3://bucketname/exampleprefix.

  • A manifest might look like this: s3://bucketname/example.manifest

    The manifest is an S3 object which is a JSON file with the following format:

    The preceding JSON matches the following s3Uris:

    [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},

    \"relative/path/to/custdata-1\",

    \"relative/path/custdata-2\",

    ...

    \"relative/path/custdata-N\"

    ]

    The preceding JSON matches the following s3Uris:

    s3://customer_bucket/some/prefix/relative/path/to/custdata-1

    s3://customer_bucket/some/prefix/relative/path/custdata-2

    ...

    s3://customer_bucket/some/prefix/relative/path/custdata-N

    The complete set of s3uris in this manifest is the input data for the channel for this datasource. The object that each s3uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.

" + "documentation":"

Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

  • A key name prefix might look like this: s3://bucketname/exampleprefix

  • A manifest might look like this: s3://bucketname/example.manifest

    A manifest is an S3 object which is a JSON file consisting of an array of elements. The first element is a prefix which is followed by one or more suffixes. SageMaker appends the suffix elements to the prefix to get a full set of S3Uri. Note that the prefix must be a valid non-empty S3Uri that precludes users from specifying a manifest whose individual S3Uri is sourced from different S3 buckets.

    The following code example shows a valid manifest format:

    [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},

    \"relative/path/to/custdata-1\",

    \"relative/path/custdata-2\",

    ...

    \"relative/path/custdata-N\"

    ]

    This JSON is equivalent to the following S3Uri list:

    s3://customer_bucket/some/prefix/relative/path/to/custdata-1

    s3://customer_bucket/some/prefix/relative/path/custdata-2

    ...

    s3://customer_bucket/some/prefix/relative/path/custdata-N

    The complete set of S3Uri in this manifest is the input data for the channel for this data source. The object that each S3Uri points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.

" }, "S3DataDistributionType":{ "shape":"S3DataDistribution", @@ -12160,6 +12316,11 @@ "max":1024, "pattern":"^(https|s3)://([^/]+)/?(.*)$" }, + "SageMakerImageArn":{ + "type":"string", + "max":256, + "pattern":"^arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:image/[a-z0-9]([-.]?[a-z0-9])*$" + }, "SamplingPercentage":{ "type":"integer", "max":100, @@ -12210,7 +12371,7 @@ "documentation":"

A Boolean operator used to evaluate the search expression. If you want every conditional statement in all lists to be satisfied for the entire search expression to be true, specify And. If only a single conditional statement needs to be true for the entire search expression to be true, specify Or. The default value is And.

" } }, - "documentation":"

A multi-expression that searches for the specified resource or resources in a search. All resource objects that satisfy the expression's condition are included in the search results. You must specify at least one subexpression, filter, or nested filter. A SearchExpression can contain up to twenty elements.

A SearchExpression contains the following components:

  • A list of Filter objects. Each filter defines a simple Boolean expression comprised of a resource property name, Boolean operator, and value. A SearchExpression can include only one Contains operator.

  • A list of NestedFilter objects. Each nested filter defines a list of Boolean expressions using a list of resource properties. A nested filter is satisfied if a single object in the list satisfies all Boolean expressions.

  • A list of SearchExpression objects. A search expression object can be nested in a list of search expression objects.

  • A Boolean operator: And or Or.

" + "documentation":"

A multi-expression that searches for the specified resource or resources in a search. All resource objects that satisfy the expression's condition are included in the search results. You must specify at least one subexpression, filter, or nested filter. A SearchExpression can contain up to twenty elements.

A SearchExpression contains the following components:

  • A list of Filter objects. Each filter defines a simple Boolean expression comprised of a resource property name, Boolean operator, and value.

  • A list of NestedFilter objects. Each nested filter defines a list of Boolean expressions using a list of resource properties. A nested filter is satisfied if a single object in the list satisfies all Boolean expressions.

  • A list of SearchExpression objects. A search expression object can be nested in a list of search expression objects.

  • A Boolean operator: And or Or.

" }, "SearchExpressionList":{ "type":"list", @@ -12223,22 +12384,22 @@ "members":{ "TrainingJob":{ "shape":"TrainingJob", - "documentation":"

A TrainingJob object that is returned as part of a Search request.

" + "documentation":"

The properties of a training job.

" }, "Experiment":{ "shape":"Experiment", - "documentation":"

A summary of the properties of an experiment.

" + "documentation":"

The properties of an experiment.

" }, "Trial":{ "shape":"Trial", - "documentation":"

A summary of the properties of a trial.

" + "documentation":"

The properties of a trial.

" }, "TrialComponent":{ "shape":"TrialComponent", - "documentation":"

A summary of the properties of a trial component.

" + "documentation":"

The properties of a trial component.

" } }, - "documentation":"

An individual search result record that contains a single resource object.

" + "documentation":"

A single resource returned as part of the Search API response.

" }, "SearchRequest":{ "type":"structure", @@ -12250,7 +12411,7 @@ }, "SearchExpression":{ "shape":"SearchExpression", - "documentation":"

A Boolean conditional statement. Resource objects must satisfy this condition to be included in search results. You must provide at least one subexpression, filter, or nested filter. The maximum number of recursive SubExpressions, NestedFilters, and Filters that can be included in a SearchExpression object is 50.

" + "documentation":"

A Boolean conditional statement. Resources must satisfy this condition to be included in search results. You must provide at least one subexpression, filter, or nested filter. The maximum number of recursive SubExpressions, NestedFilters, and Filters that can be included in a SearchExpression object is 50.

" }, "SortBy":{ "shape":"ResourcePropertyName", @@ -12262,11 +12423,11 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

If more than MaxResults resource objects match the specified SearchExpression, the SearchResponse includes a NextToken. The NextToken can be passed to the next SearchRequest to continue retrieving results for the specified SearchExpression and Sort parameters.

" + "documentation":"

If more than MaxResults resources match the specified SearchExpression, the response includes a NextToken. The NextToken can be passed to the next SearchRequest to continue retrieving results.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of results to return in a SearchResponse.

", + "documentation":"

The maximum number of results to return.

", "box":true } } @@ -12276,7 +12437,7 @@ "members":{ "Results":{ "shape":"SearchResultsList", - "documentation":"

A list of SearchResult objects.

" + "documentation":"

A list of SearchRecord objects.

" }, "NextToken":{ "shape":"NextToken", @@ -12371,18 +12532,18 @@ "members":{ "NotebookOutputOption":{ "shape":"NotebookOutputOption", - "documentation":"

The notebook output option.

" + "documentation":"

Whether to include the notebook cell output when sharing the notebook. The default is Disabled.

" }, "S3OutputPath":{ "shape":"S3Uri", - "documentation":"

The Amazon S3 output path.

" + "documentation":"

When NotebookOutputOption is Allowed, the Amazon S3 bucket used to save the notebook cell output. If S3OutputPath isn't specified, a default bucket is used.

" }, "S3KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The AWS Key Management Service encryption key ID.

" + "documentation":"

When NotebookOutputOption is Allowed, the AWS Key Management Service (KMS) encryption key ID used to encrypt the notebook cell output in the Amazon S3 bucket.

" } }, - "documentation":"

The sharing settings.

" + "documentation":"

Specifies options when sharing an Amazon SageMaker Studio notebook. These settings are specified as part of DefaultUserSettings when the CreateDomain API is called, and as part of UserSettings when the CreateUserProfile API is called.

" }, "ShuffleConfig":{ "type":"structure", @@ -12473,7 +12634,7 @@ "members":{ "Cidrs":{ "shape":"Cidrs", - "documentation":"

A list of one to four Classless Inter-Domain Routing (CIDR) values.

Maximum: Four CIDR values

The following Length Constraints apply to individual CIDR values in the CIDR value list.

" + "documentation":"

A list of one to ten Classless Inter-Domain Routing (CIDR) values.

Maximum: Ten CIDR values

The following Length Constraints apply to individual CIDR values in the CIDR value list.

" } }, "documentation":"

A list of IP address ranges (CIDRs). Used to create an allow list of IP addresses for a private workforce. For more information, see .

" @@ -12674,7 +12835,7 @@ }, "ListingId":{ "shape":"String", - "documentation":"

" + "documentation":"

Marketplace product listing ID.

" } }, "documentation":"

Describes a work team of a vendor that does the a labelling job.

" @@ -12804,7 +12965,7 @@ }, "TaskTimeLimitInSeconds":{ "type":"integer", - "max":28800, + "max":604800, "min":30 }, "TaskTitle":{ @@ -12834,7 +12995,7 @@ "members":{ "DefaultResourceSpec":{ "shape":"ResourceSpec", - "documentation":"

The instance type and quantity.

" + "documentation":"

The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" } }, "documentation":"

The TensorBoard app settings.

" @@ -12906,7 +13067,12 @@ "ml.c5.2xlarge", "ml.c5.4xlarge", "ml.c5.9xlarge", - "ml.c5.18xlarge" + "ml.c5.18xlarge", + "ml.c5n.xlarge", + "ml.c5n.2xlarge", + "ml.c5n.4xlarge", + "ml.c5n.9xlarge", + "ml.c5n.18xlarge" ] }, "TrainingInstanceTypes":{ @@ -13534,7 +13700,7 @@ "documentation":"

A list of the components associated with the trial. For each component, a summary of the component's properties is included.

" } }, - "documentation":"

A summary of the properties of a trial as returned by the Search API.

" + "documentation":"

The properties of a trial as returned by the Search API.

" }, "TrialArn":{ "type":"string", @@ -13556,7 +13722,10 @@ "shape":"TrialComponentArn", "documentation":"

The Amazon Resource Name (ARN) of the trial component.

" }, - "Source":{"shape":"TrialComponentSource"}, + "Source":{ + "shape":"TrialComponentSource", + "documentation":"

The Amazon Resource Name (ARN) and job type of the source of the component.

" + }, "Status":{"shape":"TrialComponentStatus"}, "StartTime":{ "shape":"Timestamp", @@ -13594,7 +13763,7 @@ }, "SourceDetail":{ "shape":"TrialComponentSourceDetail", - "documentation":"

The source of the trial component.>

" + "documentation":"

Details of the source of the component.

" }, "Tags":{ "shape":"TagList", @@ -13605,7 +13774,7 @@ "documentation":"

An array of the parents of the component. A parent is a trial the component is associated with and the experiment the trial is part of. A component might not have any parents.

" } }, - "documentation":"

A summary of the properties of a trial component as returned by the Search API.

" + "documentation":"

The properties of a trial component as returned by the Search API.

" }, "TrialComponentArn":{ "type":"string", @@ -13719,7 +13888,9 @@ "enum":[ "InProgress", "Completed", - "Failed" + "Failed", + "Stopping", + "Stopped" ] }, "TrialComponentSimpleSummaries":{ @@ -13752,14 +13923,14 @@ "members":{ "SourceArn":{ "shape":"TrialComponentSourceArn", - "documentation":"

The Amazon Resource Name (ARN) of the source.

" + "documentation":"

The source ARN.

" }, "SourceType":{ "shape":"SourceType", "documentation":"

The source job type.

" } }, - "documentation":"

The source of the trial component.

" + "documentation":"

The Amazon Resource Name (ARN) and job type of the source of a trial component.

" }, "TrialComponentSourceArn":{ "type":"string", @@ -13773,9 +13944,16 @@ "shape":"TrialComponentSourceArn", "documentation":"

The Amazon Resource Name (ARN) of the source.

" }, - "TrainingJob":{"shape":"TrainingJob"} + "TrainingJob":{ + "shape":"TrainingJob", + "documentation":"

Information about a training job that's the source of a trial component.

" + }, + "ProcessingJob":{ + "shape":"ProcessingJob", + "documentation":"

Information about a processing job that's the source of a trial component.

" + } }, - "documentation":"

Detailed information about the source of a trial component.

" + "documentation":"

Detailed information about the source of a trial component. Either ProcessingJob or TrainingJob is returned.

" }, "TrialComponentStatus":{ "type":"structure", @@ -13929,11 +14107,14 @@ }, "UiConfig":{ "type":"structure", - "required":["UiTemplateS3Uri"], "members":{ "UiTemplateS3Uri":{ "shape":"S3Uri", - "documentation":"

The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

" + "documentation":"

The Amazon S3 bucket location of the UI template, or worker task template. This is the template used to render the worker UI and tools for labeling job tasks. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

" + }, + "HumanTaskUiArn":{ + "shape":"HumanTaskUiArn", + "documentation":"

The ARN of the worker task template used to render the worker UI and tools for labeling job tasks.

Use this parameter when you are creating a labeling job for 3D point cloud labeling modalities. Use your labeling job task type to select one of the following ARN's and use it with this parameter when you create a labeling job. Replace aws-region with the AWS region you are creating your labeling job in.

Use this HumanTaskUiArn for 3D point cloud object detection and 3D point cloud object detection adjustment labeling jobs.

  • arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectDetection

Use this HumanTaskUiArn for 3D point cloud object tracking and 3D point cloud object tracking adjustment labeling jobs.

  • arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectTracking

Use this HumanTaskUiArn for 3D point cloud semantic segmentation and 3D point cloud semantic segmentation adjustment labeling jobs.

  • arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudSemanticSegmentation

" } }, "documentation":"

Provided configuration information for the worker UI for a labeling job.

" @@ -13993,7 +14174,7 @@ "members":{ "DomainId":{ "shape":"DomainId", - "documentation":"

The domain ID.

" + "documentation":"

The ID of the domain to be updated.

" }, "DefaultUserSettings":{ "shape":"UserSettings", @@ -14006,7 +14187,7 @@ "members":{ "DomainArn":{ "shape":"DomainArn", - "documentation":"

The domain Amazon Resource Name (ARN).

" + "documentation":"

The Amazon Resource Name (ARN) of the domain.

" } } }, @@ -14334,7 +14515,7 @@ }, "SourceIpConfig":{ "shape":"SourceIpConfig", - "documentation":"

A list of one to four worker IP address ranges (CIDRs) that can be used to access tasks assigned to this workforce.

Maximum: Four CIDR values

" + "documentation":"

A list of one to ten worker IP address ranges (CIDRs) that can be used to access tasks assigned to this workforce.

Maximum: Ten CIDR values

" } } }, @@ -14577,7 +14758,7 @@ }, "SourceIpConfig":{ "shape":"SourceIpConfig", - "documentation":"

A list of one to four IP address ranges (CIDRs) to be added to the workforce allow list.

" + "documentation":"

A list of one to ten IP address ranges (CIDRs) to be added to the workforce allow list.

" } }, "documentation":"

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each AWS Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

" @@ -14657,5 +14838,5 @@ "member":{"shape":"Workteam"} } }, - "documentation":"

Provides APIs for creating and managing Amazon SageMaker resources.

" + "documentation":"

Provides APIs for creating and managing Amazon SageMaker resources.

Other Resources:

" } diff --git a/services/sagemakera2iruntime/pom.xml b/services/sagemakera2iruntime/pom.xml index e95273abdc53..225d6b361f7b 100644 --- a/services/sagemakera2iruntime/pom.xml +++ b/services/sagemakera2iruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakera2iruntime/src/main/resources/codegen-resources/service-2.json b/services/sagemakera2iruntime/src/main/resources/codegen-resources/service-2.json index 701d101c785e..bc9ad98eb866 100644 --- a/services/sagemakera2iruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemakera2iruntime/src/main/resources/codegen-resources/service-2.json @@ -54,6 +54,7 @@ "output":{"shape":"ListHumanLoopsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], @@ -121,7 +122,7 @@ "members":{ "HumanLoopName":{ "shape":"HumanLoopName", - "documentation":"

The name of the human loop you want to delete.

", + "documentation":"

The name of the human loop that you want to delete.

", "location":"uri", "locationName":"HumanLoopName" } @@ -138,7 +139,7 @@ "members":{ "HumanLoopName":{ "shape":"HumanLoopName", - "documentation":"

The unique name of the human loop.

", + "documentation":"

The name of the human loop that you want information about.

", "location":"uri", "locationName":"HumanLoopName" } @@ -160,19 +161,19 @@ }, "FailureReason":{ "shape":"String", - "documentation":"

The reason why a human loop has failed. The failure reason is returned when the human loop status is Failed.

" + "documentation":"

The reason why a human loop failed. The failure reason is returned when the status of the human loop is Failed.

" }, "FailureCode":{ "shape":"String", - "documentation":"

A failure code denoting a specific type of failure.

" + "documentation":"

A failure code that identifies the type of failure.

" }, "HumanLoopStatus":{ "shape":"HumanLoopStatus", - "documentation":"

The status of the human loop. Valid values:

" + "documentation":"

The status of the human loop.

" }, "HumanLoopName":{ "shape":"HumanLoopName", - "documentation":"

The name of the human loop.

" + "documentation":"

The name of the human loop. The name must be lowercase, unique within the Region in your account, and can have up to 63 characters. Valid characters: a-z, 0-9, and - (hyphen).

" }, "HumanLoopArn":{ "shape":"HumanLoopArn", @@ -184,7 +185,7 @@ }, "HumanLoopOutput":{ "shape":"HumanLoopOutput", - "documentation":"

An object containing information about the output of the human loop.

" + "documentation":"

An object that contains information about the output of the human loop.

" } } }, @@ -264,7 +265,7 @@ }, "HumanLoopStatus":{ "shape":"HumanLoopStatus", - "documentation":"

The status of the human loop. Valid values:

" + "documentation":"

The status of the human loop.

" }, "CreationTime":{ "shape":"Timestamp", @@ -272,25 +273,25 @@ }, "FailureReason":{ "shape":"FailureReason", - "documentation":"

The reason why the human loop failed. A failure reason is returned only when the status of the human loop is Failed.

" + "documentation":"

The reason why the human loop failed. A failure reason is returned when the status of the human loop is Failed.

" }, "FlowDefinitionArn":{ "shape":"FlowDefinitionArn", - "documentation":"

The Amazon Resource Name (ARN) of the flow definition.

" + "documentation":"

The Amazon Resource Name (ARN) of the flow definition used to configure the human loop.

" } }, "documentation":"

Summary information about the human loop.

" }, "InputContent":{ "type":"string", - "max":4194304 + "max":3145728 }, "InternalServerException":{ "type":"structure", "members":{ "Message":{"shape":"FailureReason"} }, - "documentation":"

Your request could not be processed.

", + "documentation":"

We couldn't process your request because of an issue with the server. Try again later.

", "error":{"httpStatusCode":500}, "exception":true }, @@ -318,19 +319,19 @@ }, "SortOrder":{ "shape":"SortOrder", - "documentation":"

An optional value that specifies whether you want the results sorted in Ascending or Descending order.

", + "documentation":"

Optional. The order for displaying results. Valid values: Ascending and Descending.

", "location":"querystring", "locationName":"SortOrder" }, "NextToken":{ "shape":"NextToken", - "documentation":"

A token to resume pagination.

", + "documentation":"

A token to display the next page of results.

", "location":"querystring", "locationName":"NextToken" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken will be provided in the output that you can use to resume pagination.

", + "documentation":"

The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken is returned in the output. You can use this token to display the next page of results.

", "box":true, "location":"querystring", "locationName":"MaxResults" @@ -343,11 +344,11 @@ "members":{ "HumanLoopSummaries":{ "shape":"HumanLoopSummaries", - "documentation":"

An array of objects containing information about the human loops.

" + "documentation":"

An array of objects that contain information about the human loops.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

A token to resume pagination.

" + "documentation":"

A token to display the next page of results.

" } } }, @@ -366,7 +367,7 @@ "members":{ "Message":{"shape":"FailureReason"} }, - "documentation":"

We were unable to find the requested resource.

", + "documentation":"

We couldn't find the requested resource.

", "error":{"httpStatusCode":404}, "exception":true }, @@ -375,7 +376,7 @@ "members":{ "Message":{"shape":"FailureReason"} }, - "documentation":"

You have exceeded your service quota. To perform the requested action, remove some of the relevant resources, or request a service quota increase.

", + "documentation":"

You exceeded your service quota. Delete some resources or request an increase in your service quota.

", "error":{"httpStatusCode":402}, "exception":true }, @@ -400,15 +401,15 @@ }, "FlowDefinitionArn":{ "shape":"FlowDefinitionArn", - "documentation":"

The Amazon Resource Name (ARN) of the flow definition.

" + "documentation":"

The Amazon Resource Name (ARN) of the flow definition associated with this human loop.

" }, "HumanLoopInput":{ "shape":"HumanLoopInput", - "documentation":"

An object containing information about the human loop.

" + "documentation":"

An object that contains information about the human loop.

" }, "DataAttributes":{ "shape":"HumanLoopDataAttributes", - "documentation":"

Attributes of the data specified by the customer.

" + "documentation":"

Attributes of the specified data. Use DataAttributes to specify if your data is free of personally identifiable information and/or free of adult content.

" } } }, @@ -427,7 +428,7 @@ "members":{ "HumanLoopName":{ "shape":"HumanLoopName", - "documentation":"

The name of the human loop you want to stop.

" + "documentation":"

The name of the human loop that you want to stop.

" } } }, @@ -442,7 +443,7 @@ "members":{ "Message":{"shape":"FailureReason"} }, - "documentation":"

Your request has exceeded the allowed amount of requests.

", + "documentation":"

You exceeded the maximum number of requests.

", "error":{"httpStatusCode":429}, "exception":true }, @@ -452,10 +453,10 @@ "members":{ "Message":{"shape":"FailureReason"} }, - "documentation":"

Your request was not valid. Check the syntax and try again.

", + "documentation":"

The request isn't valid. Check the syntax and try again.

", "error":{"httpStatusCode":400}, "exception":true } }, - "documentation":"

Amazon Augmented AI (Augmented AI) (Preview) is a service that adds human judgment to any machine learning application. Human reviewers can take over when an AI application can't evaluate data with a high degree of confidence.

From fraudulent bank transaction identification to document processing to image analysis, machine learning models can be trained to make decisions as well as or better than a human. Nevertheless, some decisions require contextual interpretation, such as when you need to decide whether an image is appropriate for a given audience. Content moderation guidelines are nuanced and highly dependent on context, and they vary between countries. When trying to apply AI in these situations, you can be forced to choose between \"ML only\" systems with unacceptably high error rates or \"human only\" systems that are expensive and difficult to scale, and that slow down decision making.

This API reference includes information about API actions and data types you can use to interact with Augmented AI programmatically.

You can create a flow definition against the Augmented AI API. Provide the Amazon Resource Name (ARN) of a flow definition to integrate AI service APIs, such as Textract.AnalyzeDocument and Rekognition.DetectModerationLabels. These AI services, in turn, invoke the StartHumanLoop API, which evaluates conditions under which humans will be invoked. If humans are required, Augmented AI creates a human loop. Results of human work are available asynchronously in Amazon Simple Storage Service (Amazon S3). You can use Amazon CloudWatch Events to detect human work results.

You can find additional Augmented AI API documentation in the following reference guides: Amazon Rekognition, Amazon SageMaker, and Amazon Textract.

" + "documentation":"

Amazon Augmented AI is in preview release and is subject to change. We do not recommend using this product in production environments.

Amazon Augmented AI (Amazon A2I) adds the benefit of human judgment to any machine learning application. When an AI application can't evaluate data with a high degree of confidence, human reviewers can take over. This human review is called a human review workflow. To create and start a human review workflow, you need three resources: a worker task template, a flow definition, and a human loop.

For information about these resources and prerequisites for using Amazon A2I, see Get Started with Amazon Augmented AI in the Amazon SageMaker Developer Guide.

This API reference includes information about API actions and data types that you can use to interact with Amazon A2I programmatically. Use this guide to:

  • Start a human loop with the StartHumanLoop operation when using Amazon A2I with a custom task type. To learn more about the difference between custom and built-in task types, see Use Task Types . To learn how to start a human loop using this API, see Create and Start a Human Loop for a Custom Task Type in the Amazon SageMaker Developer Guide.

  • Manage your human loops. You can list all human loops that you have created, describe individual human loops, and stop and delete human loops. To learn more, see Monitor and Manage Your Human Loop in the Amazon SageMaker Developer Guide.

Amazon A2I integrates APIs from various AWS services to create and start human review workflows for those services. To learn how Amazon A2I uses these APIs, see Use APIs in Amazon A2I in the Amazon SageMaker Developer Guide.

" } diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index a280baad4359..1d73cc2d2c34 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/sagemakerruntime/src/main/resources/codegen-resources/service-2.json b/services/sagemakerruntime/src/main/resources/codegen-resources/service-2.json index 41224d5ffce4..a12c74b63ca6 100644 --- a/services/sagemakerruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemakerruntime/src/main/resources/codegen-resources/service-2.json @@ -32,7 +32,7 @@ "shapes":{ "BodyBlob":{ "type":"blob", - "max":5242880, + "max":6291456, "sensitive":true }, "CustomAttributesHeader":{ @@ -77,7 +77,7 @@ }, "Body":{ "shape":"BodyBlob", - "documentation":"

Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.

For information about the format of the request body, see Common Data Formats—Inference.

" + "documentation":"

Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.

For information about the format of the request body, see Common Data Formats-Inference.

" }, "ContentType":{ "shape":"Header", @@ -99,9 +99,15 @@ }, "TargetModel":{ "shape":"TargetModelHeader", - "documentation":"

Specifies the model to be requested for an inference when invoking a multi-model endpoint.

", + "documentation":"

The model to request for inference when invoking a multi-model endpoint.

", "location":"header", "locationName":"X-Amzn-SageMaker-Target-Model" + }, + "TargetVariant":{ + "shape":"TargetVariantHeader", + "documentation":"

Specify the production variant to send the inference request to when invoking an endpoint that is running two or more variants. Note that this parameter overrides the default behavior for the endpoint, which is to distribute the invocation traffic based on the variant weights.

", + "location":"header", + "locationName":"X-Amzn-SageMaker-Target-Variant" } }, "payload":"Body" @@ -112,7 +118,7 @@ "members":{ "Body":{ "shape":"BodyBlob", - "documentation":"

Includes the inference provided by the model.

For information about the format of the response body, see Common Data Formats—Inference.

" + "documentation":"

Includes the inference provided by the model.

For information about the format of the response body, see Common Data Formats-Inference.

" }, "ContentType":{ "shape":"Header", @@ -179,6 +185,11 @@ "min":1, "pattern":"\\A\\S[\\p{Print}]*\\z" }, + "TargetVariantHeader":{ + "type":"string", + "max":63, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, "ValidationError":{ "type":"structure", "members":{ diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index 38e1a10271e4..6e628a8b86ca 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index a06a19c5d2bf..59cd1847442d 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/schemas/src/main/resources/codegen-resources/service-2.json b/services/schemas/src/main/resources/codegen-resources/service-2.json index 89dbc62272d3..7a1f044a8265 100644 --- a/services/schemas/src/main/resources/codegen-resources/service-2.json +++ b/services/schemas/src/main/resources/codegen-resources/service-2.json @@ -22,26 +22,33 @@ "shape": "CreateDiscovererRequest" }, "output": { - "shape": "CreateDiscovererResponse" + "shape": "CreateDiscovererResponse", + "documentation": "

201 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" }, { - "shape": "ConflictException" + "shape": "ConflictException", + "documentation": "

409 response

" } ], "documentation": "

Creates a discoverer.

" @@ -57,26 +64,33 @@ "shape": "CreateRegistryRequest" }, "output": { - "shape": "CreateRegistryResponse" + "shape": "CreateRegistryResponse", + "documentation": "

201 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" }, { - "shape": "ConflictException" + "shape": "ConflictException", + "documentation": "

409 response

" } ], "documentation": "

Creates a registry.

" @@ -92,23 +106,28 @@ "shape": "CreateSchemaRequest" }, "output": { - "shape": "CreateSchemaResponse" + "shape": "CreateSchemaResponse", + "documentation": "

201 response

" }, "errors": [ { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" }, { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" } ], - "documentation": "

Creates a schema definition.

" + "documentation": "

Creates a schema definition.

Inactive schemas will be deleted after two years.

" }, "DeleteDiscoverer": { "name": "DeleteDiscoverer", @@ -122,22 +141,28 @@ }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Deletes a discoverer.

" @@ -154,26 +179,70 @@ }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Deletes a Registry.

" }, + "DeleteResourcePolicy": { + "name": "DeleteResourcePolicy", + "http": { + "method": "DELETE", + "requestUri": "/v1/policy", + "responseCode": 204 + }, + "input": { + "shape": "DeleteResourcePolicyRequest" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

400 response

" + }, + { + "shape": "UnauthorizedException", + "documentation": "

401 response

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

500 response

" + }, + { + "shape": "ForbiddenException", + "documentation": "

403 response

" + }, + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" + } + ], + "documentation": "

Delete the resource-based policy attached to the specified registry.

" + }, "DeleteSchema": { "name": "DeleteSchema", "http": { @@ -186,22 +255,28 @@ }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Delete a schema definition.

" @@ -218,22 +293,28 @@ }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Delete the schema version definition

" @@ -249,26 +330,33 @@ "shape": "DescribeCodeBindingRequest" }, "output": { - "shape": "DescribeCodeBindingResponse" + "shape": "DescribeCodeBindingResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "TooManyRequestsException" + "shape": "TooManyRequestsException", + "documentation": "

429 response

" } ], "documentation": "

Describe the code binding URI.

" @@ -284,26 +372,33 @@ "shape": "DescribeDiscovererRequest" }, "output": { - "shape": "DescribeDiscovererResponse" + "shape": "DescribeDiscovererResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Describes the discoverer.

" @@ -319,26 +414,33 @@ "shape": "DescribeRegistryRequest" }, "output": { - "shape": "DescribeRegistryResponse" + "shape": "DescribeRegistryResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Describes the registry.

" @@ -354,26 +456,33 @@ "shape": "DescribeSchemaRequest" }, "output": { - "shape": "DescribeSchemaResponse" + "shape": "DescribeSchemaResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Retrieve the schema definition.

" @@ -389,26 +498,33 @@ "shape": "GetCodeBindingSourceRequest" }, "output": { - "shape": "GetCodeBindingSourceResponse" + "shape": "GetCodeBindingSourceResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "TooManyRequestsException" + "shape": "TooManyRequestsException", + "documentation": "

429 response

" } ], "documentation": "

Get the code binding source URI.

" @@ -424,27 +540,75 @@ "shape": "GetDiscoveredSchemaRequest" }, "output": { - "shape": "GetDiscoveredSchemaResponse" + "shape": "GetDiscoveredSchemaResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" }, { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" } ], "documentation": "

Get the discovered schema that was generated based on sampled events.

" }, + "GetResourcePolicy": { + "name": "GetResourcePolicy", + "http": { + "method": "GET", + "requestUri": "/v1/policy", + "responseCode": 200 + }, + "input": { + "shape": "GetResourcePolicyRequest" + }, + "output": { + "shape": "GetResourcePolicyResponse", + "documentation": "

Get Resource-Based Policy Response

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

400 response

" + }, + { + "shape": "UnauthorizedException", + "documentation": "

401 response

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

500 response

" + }, + { + "shape": "ForbiddenException", + "documentation": "

403 response

" + }, + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" + } + ], + "documentation": "

Retrieves the resource-based policy attached to a given registry.

" + }, "ListDiscoverers": { "name": "ListDiscoverers", "http": { @@ -456,23 +620,29 @@ "shape": "ListDiscoverersRequest" }, "output": { - "shape": "ListDiscoverersResponse" + "shape": "ListDiscoverersResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" }, { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" } ], "documentation": "

List the discoverers.

" @@ -488,23 +658,29 @@ "shape": "ListRegistriesRequest" }, "output": { - "shape": "ListRegistriesResponse" + "shape": "ListRegistriesResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" }, { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" } ], "documentation": "

List the registries.

" @@ -520,26 +696,33 @@ "shape": "ListSchemaVersionsRequest" }, "output": { - "shape": "ListSchemaVersionsResponse" + "shape": "ListSchemaVersionsResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Provides a list of the schema versions and related information.

" @@ -555,23 +738,29 @@ "shape": "ListSchemasRequest" }, "output": { - "shape": "ListSchemasResponse" + "shape": "ListSchemasResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" }, { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" } ], "documentation": "

List the schemas.

" @@ -587,93 +776,120 @@ "shape": "ListTagsForResourceRequest" }, "output": { - "shape": "ListTagsForResourceResponse" + "shape": "ListTagsForResourceResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" } ], "documentation": "

Get tags for resource.

" }, - "LockServiceLinkedRole": { - "name": "LockServiceLinkedRole", + "PutCodeBinding": { + "name": "PutCodeBinding", "http": { "method": "POST", - "requestUri": "/slr-deletion/lock", - "responseCode": 200 + "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}", + "responseCode": 202 }, "input": { - "shape": "LockServiceLinkedRoleRequest" + "shape": "PutCodeBindingRequest" }, "output": { - "shape": "LockServiceLinkedRoleResponse" + "shape": "PutCodeBindingResponse", + "documentation": "

202 response

" }, "errors": [ { - "shape": "ServiceUnavailableException" + "shape": "GoneException", + "documentation": "

410 response

" + }, + { + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "BadRequestException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "UnauthorizedException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "InternalServerErrorException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "ForbiddenException" + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" } ], - "internal": true + "documentation": "

Put code binding URI

" }, - "PutCodeBinding": { - "name": "PutCodeBinding", + "PutResourcePolicy": { + "name": "PutResourcePolicy", "http": { - "method": "POST", - "requestUri": "/v1/registries/name/{registryName}/schemas/name/{schemaName}/language/{language}", - "responseCode": 202 + "method": "PUT", + "requestUri": "/v1/policy", + "responseCode": 200 }, "input": { - "shape": "PutCodeBindingRequest" + "shape": "PutResourcePolicyRequest" }, "output": { - "shape": "PutCodeBindingResponse" + "shape": "PutResourcePolicyResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "GoneException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "BadRequestException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "UnauthorizedException" + "shape": "PreconditionFailedException", + "documentation": "

412 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "TooManyRequestsException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], - "documentation": "

Put code binding URI

" + "documentation": "

The name of the policy.

" }, "SearchSchemas": { "name": "SearchSchemas", @@ -686,23 +902,29 @@ "shape": "SearchSchemasRequest" }, "output": { - "shape": "SearchSchemasResponse" + "shape": "SearchSchemasResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" }, { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" } ], "documentation": "

Search the schemas

" @@ -718,26 +940,33 @@ "shape": "StartDiscovererRequest" }, "output": { - "shape": "StartDiscovererResponse" + "shape": "StartDiscovererResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Starts the discoverer

" @@ -753,26 +982,33 @@ "shape": "StopDiscovererRequest" }, "output": { - "shape": "StopDiscovererResponse" + "shape": "StopDiscovererResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Stops the discoverer

" @@ -789,52 +1025,24 @@ }, "errors": [ { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" } ], "documentation": "

Add tags to a resource.

" }, - "UnlockServiceLinkedRole": { - "name": "UnlockServiceLinkedRole", - "http": { - "method": "POST", - "requestUri": "/slr-deletion/unlock", - "responseCode": 200 - }, - "input": { - "shape": "UnlockServiceLinkedRoleRequest" - }, - "output": { - "shape": "UnlockServiceLinkedRoleResponse" - }, - "errors": [ - { - "shape": "ServiceUnavailableException" - }, - { - "shape": "BadRequestException" - }, - { - "shape": "UnauthorizedException" - }, - { - "shape": "InternalServerErrorException" - }, - { - "shape": "ForbiddenException" - } - ], - "internal": true - }, "UntagResource": { "name": "UntagResource", "http": { @@ -847,16 +1055,20 @@ }, "errors": [ { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" } ], "documentation": "

Removes tags from a resource.

" @@ -872,26 +1084,33 @@ "shape": "UpdateDiscovererRequest" }, "output": { - "shape": "UpdateDiscovererResponse" + "shape": "UpdateDiscovererResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Updates the discoverer

" @@ -907,26 +1126,33 @@ "shape": "UpdateRegistryRequest" }, "output": { - "shape": "UpdateRegistryResponse" + "shape": "UpdateRegistryResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "UnauthorizedException" + "shape": "UnauthorizedException", + "documentation": "

401 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], "documentation": "

Updates a registry.

" @@ -942,26 +1168,32 @@ "shape": "UpdateSchemaRequest" }, "output": { - "shape": "UpdateSchemaResponse" + "shape": "UpdateSchemaResponse", + "documentation": "

200 response

" }, "errors": [ { - "shape": "BadRequestException" + "shape": "BadRequestException", + "documentation": "

400 response

" }, { - "shape": "InternalServerErrorException" + "shape": "InternalServerErrorException", + "documentation": "

500 response

" }, { - "shape": "ForbiddenException" + "shape": "ForbiddenException", + "documentation": "

403 response

" }, { - "shape": "NotFoundException" + "shape": "NotFoundException", + "documentation": "

404 response

" }, { - "shape": "ServiceUnavailableException" + "shape": "ServiceUnavailableException", + "documentation": "

503 response

" } ], - "documentation": "

Updates the schema definition

" + "documentation": "

Updates the schema definition

Inactive schemas will be deleted after two years.

" } }, "shapes": { @@ -1074,6 +1306,7 @@ "documentation": "

Tags associated with the resource.

" } }, + "documentation": "", "required": [ "SourceArn" ] @@ -1132,7 +1365,8 @@ "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "Tags": { "shape": "Tags", @@ -1170,7 +1404,8 @@ "type": "structure", "members": { "Content": { - "shape": "__stringMin1Max100000" + "shape": "__stringMin1Max100000", + "documentation": "

The source of the schema definition.

" }, "Description": { "shape": "__stringMin0Max256", @@ -1182,7 +1417,8 @@ "documentation": "

Tags associated with the schema.

" }, "Type": { - "shape": "Type" + "shape": "Type", + "documentation": "

The type of schema.

" } }, "required": [ @@ -1194,7 +1430,8 @@ "type": "structure", "members": { "Content": { - "shape": "__stringMin1Max100000" + "shape": "__stringMin1Max100000", + "documentation": "

The source of the schema definition.

" }, "Description": { "shape": "__stringMin0Max256", @@ -1203,12 +1440,14 @@ "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "SchemaName": { "shape": "__string", "location": "uri", - "locationName": "schemaName" + "locationName": "schemaName", + "documentation": "

The name of the schema.

" }, "Tags": { "shape": "Tags", @@ -1216,7 +1455,8 @@ "documentation": "

Tags associated with the schema.

" }, "Type": { - "shape": "Type" + "shape": "Type", + "documentation": "

The type of schema.

" } }, "required": [ @@ -1269,7 +1509,8 @@ "DiscovererId": { "shape": "__string", "location": "uri", - "locationName": "discovererId" + "locationName": "discovererId", + "documentation": "

The ID of the discoverer.

" } }, "required": [ @@ -1282,25 +1523,39 @@ "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" } }, "required": [ "RegistryName" ] }, + "DeleteResourcePolicyRequest": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "location": "querystring", + "locationName": "registryName", + "documentation": "

The name of the registry.

" + } + } + }, "DeleteSchemaRequest": { "type": "structure", "members": { "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "SchemaName": { "shape": "__string", "location": "uri", - "locationName": "schemaName" + "locationName": "schemaName", + "documentation": "

The name of the schema.

" } }, "required": [ @@ -1314,17 +1569,20 @@ "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "SchemaName": { "shape": "__string", "location": "uri", - "locationName": "schemaName" + "locationName": "schemaName", + "documentation": "

The name of the schema.

" }, "SchemaVersion": { "shape": "__string", "location": "uri", - "locationName": "schemaVersion" + "locationName": "schemaVersion", + "documentation": "The version number of the schema" } }, "required": [ @@ -1339,22 +1597,26 @@ "Language": { "shape": "__string", "location": "uri", - "locationName": "language" + "locationName": "language", + "documentation": "

The language of the code binding.

" }, "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "SchemaName": { "shape": "__string", "location": "uri", - "locationName": "schemaName" + "locationName": "schemaName", + "documentation": "

The name of the schema.

" }, "SchemaVersion": { "shape": "__string", "location": "querystring", - "locationName": "schemaVersion" + "locationName": "schemaVersion", + "documentation": "

Specifying this limits the results to only this schema version.

" } }, "required": [ @@ -1390,7 +1652,8 @@ "DiscovererId": { "shape": "__string", "location": "uri", - "locationName": "discovererId" + "locationName": "discovererId", + "documentation": "

The ID of the discoverer.

" } }, "required": [ @@ -1433,7 +1696,8 @@ "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" } }, "required": [ @@ -1466,7 +1730,8 @@ "type": "structure", "members": { "Content": { - "shape": "__string" + "shape": "__string", + "documentation": "

The source of the schema definition.

" }, "Description": { "shape": "__string", @@ -1509,17 +1774,20 @@ "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "SchemaName": { "shape": "__string", "location": "uri", - "locationName": "schemaName" + "locationName": "schemaName", + "documentation": "

The name of the schema.

" }, "SchemaVersion": { "shape": "__string", "location": "querystring", - "locationName": "schemaVersion" + "locationName": "schemaVersion", + "documentation": "

Specifying this limits the results to only this schema version.

" } }, "required": [ @@ -1531,7 +1799,8 @@ "type": "structure", "members": { "Content": { - "shape": "__string" + "shape": "__string", + "documentation": "

The source of the schema definition.

" }, "Description": { "shape": "__string", @@ -1634,7 +1903,8 @@ "documentation": "

The ARN of the event bus.

" }, "State": { - "shape": "DiscovererState" + "shape": "DiscovererState", + "documentation": "

The state of the discoverer.

" }, "Tags": { "shape": "Tags", @@ -1690,22 +1960,26 @@ "Language": { "shape": "__string", "location": "uri", - "locationName": "language" + "locationName": "language", + "documentation": "

The language of the code binding.

" }, "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "SchemaName": { "shape": "__string", "location": "uri", - "locationName": "schemaName" + "locationName": "schemaName", + "documentation": "

The name of the schema.

" }, "SchemaVersion": { "shape": "__string", "location": "querystring", - "locationName": "schemaVersion" + "locationName": "schemaVersion", + "documentation": "

Specifying this limits the results to only this schema version.

" } }, "required": [ @@ -1728,7 +2002,7 @@ "members": { "Events": { "shape": "__listOfGetDiscoveredSchemaVersionItemInput", - "documentation": "

An array of strings that

" + "documentation": "

An array of strings where each string is a JSON event. These are the events that were used to generate the schema. The array includes a single type of event and has a maximum size of 10 events.

" }, "Type": { "shape": "Type", @@ -1744,16 +2018,18 @@ "type": "structure", "members": { "Content": { - "shape": "__string" + "shape": "__string", + "documentation": "

The source of the schema definition.

" } - } + }, + "documentation": "

" }, "GetDiscoveredSchemaRequest": { "type": "structure", "members": { "Events": { "shape": "__listOfGetDiscoveredSchemaVersionItemInput", - "documentation": "

An array of strings that

" + "documentation": "

An array of strings where each string is a JSON event. These are the events that were used to generate the schema. The array includes a single type of event and has a maximum size of 10 events.

" }, "Type": { "shape": "Type", @@ -1769,7 +2045,8 @@ "type": "structure", "members": { "Content": { - "shape": "__string" + "shape": "__string", + "documentation": "

The source of the schema definition.

" } } }, @@ -1778,6 +2055,46 @@ "min": 1, "max": 100000 }, + "GetResourcePolicyOutput": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

The resource-based policy.

", + "jsonvalue": true + }, + "RevisionId": { + "shape": "__string", + "documentation": "

The revision ID.

" + } + }, + "documentation": "

Information about the policy.

" + }, + "GetResourcePolicyRequest": { + "type": "structure", + "members": { + "RegistryName": { + "shape": "__string", + "location": "querystring", + "locationName": "registryName", + "documentation": "

The name of the registry.

" + } + } + }, + "GetResourcePolicyResponse": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

The resource-based policy.

", + "jsonvalue": true + }, + "RevisionId": { + "shape": "__string", + "documentation": "

The revision ID.

" + } + } + }, "GoneException": { "type": "structure", "members": { @@ -1844,7 +2161,8 @@ "DiscovererIdPrefix": { "shape": "__string", "location": "querystring", - "locationName": "discovererIdPrefix" + "locationName": "discovererIdPrefix", + "documentation": "

Specifying this limits the results to only those discoverer IDs that start with the specified prefix.

" }, "Limit": { "shape": "__integer", @@ -1854,12 +2172,14 @@ "NextToken": { "shape": "__string", "location": "querystring", - "locationName": "nextToken" + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

" }, "SourceArnPrefix": { "shape": "__string", "location": "querystring", - "locationName": "sourceArnPrefix" + "locationName": "sourceArnPrefix", + "documentation": "

Specifying this limits the results to only those ARNs that start with the specified prefix.

" } } }, @@ -1901,17 +2221,20 @@ "NextToken": { "shape": "__string", "location": "querystring", - "locationName": "nextToken" + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

" }, "RegistryNamePrefix": { "shape": "__string", "location": "querystring", - "locationName": "registryNamePrefix" + "locationName": "registryNamePrefix", + "documentation": "

Specifying this limits the results to only those registry names that start with the specified prefix.

" }, "Scope": { "shape": "__string", "location": "querystring", - "locationName": "scope" + "locationName": "scope", + "documentation": "

Can be set to Local or AWS to limit responses to your custom registries, or the ones provided by AWS.

" } } }, @@ -1952,17 +2275,20 @@ "NextToken": { "shape": "__string", "location": "querystring", - "locationName": "nextToken" + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

" }, "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "SchemaName": { "shape": "__string", "location": "uri", - "locationName": "schemaName" + "locationName": "schemaName", + "documentation": "

The name of the schema.

" } }, "required": [ @@ -2007,17 +2333,20 @@ "NextToken": { "shape": "__string", "location": "querystring", - "locationName": "nextToken" + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

" }, "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "SchemaNamePrefix": { "shape": "__string", "location": "querystring", - "locationName": "schemaNamePrefix" + "locationName": "schemaNamePrefix", + "documentation": "

Specifying this limits the results to only those schema names that start with the specified prefix.

" } }, "required": [ @@ -2037,13 +2366,23 @@ } } }, + "ListTagsForResourceOutput": { + "type": "structure", + "members": { + "Tags": { + "shape": "Tags", + "locationName": "tags" + } + } + }, "ListTagsForResourceRequest": { "type": "structure", "members": { "ResourceArn": { "shape": "__string", "location": "uri", - "locationName": "resource-arn" + "locationName": "resource-arn", + "documentation": "

The ARN of the resource.

" } }, "required": [ @@ -2054,12 +2393,10 @@ "type": "structure", "members": { "Tags": { - "shape": "Tags" + "shape": "Tags", + "locationName": "tags" } - }, - "required": [ - "Tags" - ] + } }, "LockServiceLinkedRoleInput": { "type": "structure", @@ -2100,6 +2437,7 @@ "shape": "__integerMin1Max29000" } }, + "documentation": "", "required": [ "Timeout", "RoleArn" @@ -2140,28 +2478,53 @@ "httpStatusCode": 404 } }, + "PreconditionFailedException": { + "type": "structure", + "members": { + "Code": { + "shape": "__string", + "documentation": "

The error code.

" + }, + "Message": { + "shape": "__string", + "documentation": "

The message string of the error output.

" + } + }, + "required": [ + "Message", + "Code" + ], + "exception": true, + "error": { + "httpStatusCode": 412 + } + }, "PutCodeBindingRequest": { "type": "structure", "members": { "Language": { "shape": "__string", "location": "uri", - "locationName": "language" + "locationName": "language", + "documentation": "

The language of the code binding.

" }, "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "SchemaName": { "shape": "__string", "location": "uri", - "locationName": "schemaName" + "locationName": "schemaName", + "documentation": "

The name of the schema.

" }, "SchemaVersion": { "shape": "__string", "location": "querystring", - "locationName": "schemaVersion" + "locationName": "schemaVersion", + "documentation": "

Specifying this limits the results to only this schema version.

" } }, "required": [ @@ -2191,6 +2554,77 @@ } } }, + "PutResourcePolicyInput": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

The resource-based policy.

", + "jsonvalue": true + }, + "RevisionId": { + "shape": "__string", + "documentation": "

The revision ID of the policy.

" + } + }, + "documentation": "

Only update the policy if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.

", + "required": [ + "Policy" + ] + }, + "PutResourcePolicyOutput": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

The resource-based policy.

", + "jsonvalue": true + }, + "RevisionId": { + "shape": "__string", + "documentation": "

The revision ID of the policy.

" + } + }, + "documentation": "

The resource-based policy.

" + }, + "PutResourcePolicyRequest": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

The resource-based policy.

", + "jsonvalue": true + }, + "RegistryName": { + "shape": "__string", + "location": "querystring", + "locationName": "registryName", + "documentation": "

The name of the registry.

" + }, + "RevisionId": { + "shape": "__string", + "documentation": "

The revision ID of the policy.

" + } + }, + "documentation": "

The name of the policy.

", + "required": [ + "Policy" + ] + }, + "PutResourcePolicyResponse": { + "type": "structure", + "members": { + "Policy": { + "shape": "__string", + "documentation": "

The resource-based policy.

", + "jsonvalue": true + }, + "RevisionId": { + "shape": "__string", + "documentation": "

The revision ID of the policy.

" + } + } + }, "RegistryOutput": { "type": "structure", "members": { @@ -2337,7 +2771,8 @@ "type": "structure", "members": { "CreatedDate": { - "shape": "__timestampIso8601" + "shape": "__timestampIso8601", + "documentation": "

The date the schema version was created.

" }, "SchemaVersion": { "shape": "__string", @@ -2364,7 +2799,8 @@ "Keywords": { "shape": "__string", "location": "querystring", - "locationName": "keywords" + "locationName": "keywords", + "documentation": "

Specifying this limits the results to only schemas that include the provided keywords.

" }, "Limit": { "shape": "__integer", @@ -2374,12 +2810,14 @@ "NextToken": { "shape": "__string", "location": "querystring", - "locationName": "nextToken" + "locationName": "nextToken", + "documentation": "

The token that specifies the next page of results to return. To request the first page, leave NextToken empty. The token will expire in 24 hours, and cannot be shared with other accounts.

" }, "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" } }, "required": [ @@ -2427,7 +2865,8 @@ "DiscovererId": { "shape": "__string", "location": "uri", - "locationName": "discovererId" + "locationName": "discovererId", + "documentation": "

The ID of the discoverer.

" } }, "required": [ @@ -2453,7 +2892,8 @@ "DiscovererId": { "shape": "__string", "location": "uri", - "locationName": "discovererId" + "locationName": "discovererId", + "documentation": "

The ID of the discoverer.

" } }, "required": [ @@ -2478,7 +2918,8 @@ "members": { "Tags": { "shape": "Tags", - "locationName": "tags" + "locationName": "tags", + "documentation": "

Tags associated with the resource.

" } }, "required": [ @@ -2491,13 +2932,16 @@ "ResourceArn": { "shape": "__string", "location": "uri", - "locationName": "resource-arn" + "locationName": "resource-arn", + "documentation": "

The ARN of the resource.

" }, "Tags": { "shape": "Tags", - "locationName": "tags" + "locationName": "tags", + "documentation": "

Tags associated with the resource.

" } }, + "documentation": "

", "required": [ "ResourceArn", "Tags" @@ -2593,12 +3037,14 @@ "ResourceArn": { "shape": "__string", "location": "uri", - "locationName": "resource-arn" + "locationName": "resource-arn", + "documentation": "

The ARN of the resource.

" }, "TagKeys": { "shape": "__listOf__string", "location": "querystring", - "locationName": "tagKeys" + "locationName": "tagKeys", + "documentation": "

Keys of key-value pairs.

" } }, "required": [ @@ -2625,7 +3071,8 @@ "DiscovererId": { "shape": "__string", "location": "uri", - "locationName": "discovererId" + "locationName": "discovererId", + "documentation": "

The ID of the discoverer.

" } }, "required": [ @@ -2681,9 +3128,11 @@ "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" } }, + "documentation": "

Updates the registry.

", "required": [ "RegistryName" ] @@ -2751,12 +3200,14 @@ "RegistryName": { "shape": "__string", "location": "uri", - "locationName": "registryName" + "locationName": "registryName", + "documentation": "

The name of the registry.

" }, "SchemaName": { "shape": "__string", "location": "uri", - "locationName": "schemaName" + "locationName": "schemaName", + "documentation": "

The name of the schema.

" }, "Type": { "shape": "Type", @@ -2912,5 +3363,5 @@ "type": "blob" } }, - "documentation": "

AWS EventBridge Schemas

" + "documentation": "

Amazon EventBridge Schema Registry

" } diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index 0544a1dd5e6a..5d47f639f566 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/secretsmanager/src/main/resources/codegen-resources/service-2.json b/services/secretsmanager/src/main/resources/codegen-resources/service-2.json index b39a887620e1..62311e41b522 100644 --- a/services/secretsmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/secretsmanager/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Disables automatic scheduled rotation and cancels the rotation of a secret if one is currently in progress.

To re-enable scheduled rotation, call RotateSecret with AutomaticallyRotateAfterDays set to a value greater than 0. This will immediately rotate your secret and then enable the automatic schedule.

If you cancel a rotation that is in progress, it can leave the VersionStage labels in an unexpected state. Depending on what step of the rotation was in progress, you might need to remove the staging label AWSPENDING from the partially created version, specified by the VersionId response value. You should also evaluate the partially rotated new version to see if it should be deleted, which you can do by removing all staging labels from the new version's VersionStage field.

To successfully start a rotation, the staging label AWSPENDING must be in one of the following states:

  • Not be attached to any version at all

  • Attached to the same version as the staging label AWSCURRENT

If the staging label AWSPENDING is attached to a different version than the version with AWSCURRENT then the attempt to rotate fails.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:CancelRotateSecret

Related operations

  • To configure rotation for a secret or to manually trigger a rotation, use RotateSecret.

  • To get the rotation configuration details for a secret, use DescribeSecret.

  • To list all of the currently available secrets, use ListSecrets.

  • To list all of the versions currently associated with a secret, use ListSecretVersionIds.

" + "documentation":"

Disables automatic scheduled rotation and cancels the rotation of a secret if currently in progress.

To re-enable scheduled rotation, call RotateSecret with AutomaticallyRotateAfterDays set to a value greater than 0. This immediately rotates your secret and then enables the automatic schedule.

If you cancel a rotation while in progress, it can leave the VersionStage labels in an unexpected state. Depending on the step of the rotation in progress, you might need to remove the staging label AWSPENDING from the partially created version, specified by the VersionId response value. You should also evaluate the partially rotated new version to see if it should be deleted, which you can do by removing all staging labels from the new version VersionStage field.

To successfully start a rotation, the staging label AWSPENDING must be in one of the following states:

  • Not attached to any version at all

  • Attached to the same version as the staging label AWSCURRENT

If the staging label AWSPENDING attached to a different version than the version with AWSCURRENT then the attempt to rotate fails.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:CancelRotateSecret

Related operations

  • To configure rotation for a secret or to manually trigger a rotation, use RotateSecret.

  • To get the rotation configuration details for a secret, use DescribeSecret.

  • To list all of the currently available secrets, use ListSecrets.

  • To list all of the versions currently associated with a secret, use ListSecretVersionIds.

" }, "CreateSecret":{ "name":"CreateSecret", @@ -48,7 +48,7 @@ {"shape":"InternalServiceError"}, {"shape":"PreconditionNotMetException"} ], - "documentation":"

Creates a new secret. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.

Secrets Manager stores the encrypted secret data in one of a collection of \"versions\" associated with the secret. Each version contains a copy of the encrypted secret data. Each version is associated with one or more \"staging labels\" that identify where the version is in the rotation cycle. The SecretVersionsToStages field of the secret contains the mapping of staging labels to the active versions of the secret. Versions without a staging label are considered deprecated and are not included in the list.

You provide the secret data to be encrypted by putting text in either the SecretString parameter or binary data in the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager also creates an initial secret version and automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:CreateSecret

  • kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

  • secretsmanager:TagResource - needed only if you include the Tags parameter.

Related operations

  • To delete a secret, use DeleteSecret.

  • To modify an existing secret, use UpdateSecret.

  • To create a new version of a secret, use PutSecretValue.

  • To retrieve the encrypted secure string and secure binary values, use GetSecretValue.

  • To retrieve all other details for a secret, use DescribeSecret. This does not include the encrypted secure string and secure binary values.

  • To retrieve the list of secret versions associated with the current secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" + "documentation":"

Creates a new secret. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.

Secrets Manager stores the encrypted secret data in one of a collection of \"versions\" associated with the secret. Each version contains a copy of the encrypted secret data. Each version is associated with one or more \"staging labels\" that identify where the version is in the rotation cycle. The SecretVersionsToStages field of the secret contains the mapping of staging labels to the active versions of the secret. Versions without a staging label are considered deprecated and not included in the list.

You provide the secret data to be encrypted by putting text in either the SecretString parameter or binary data in the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager also creates an initial secret version and automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS creating the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:CreateSecret

  • kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account default AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account default AWS managed CMK for Secrets Manager.

  • secretsmanager:TagResource - needed only if you include the Tags parameter.

Related operations

  • To delete a secret, use DeleteSecret.

  • To modify an existing secret, use UpdateSecret.

  • To create a new version of a secret, use PutSecretValue.

  • To retrieve the encrypted secure string and secure binary values, use GetSecretValue.

  • To retrieve all other details for a secret, use DescribeSecret. This does not include the encrypted secure string and secure binary values.

  • To retrieve the list of secret versions associated with the current secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", @@ -63,7 +63,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Deletes the resource-based permission policy that's attached to the secret.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:DeleteResourcePolicy

Related operations

  • To attach a resource policy to a secret, use PutResourcePolicy.

  • To retrieve the current resource-based policy that's attached to a secret, use GetResourcePolicy.

  • To list all of the currently available secrets, use ListSecrets.

" + "documentation":"

Deletes the resource-based permission policy attached to the secret.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:DeleteResourcePolicy

Related operations

  • To attach a resource policy to a secret, use PutResourcePolicy.

  • To retrieve the current resource-based policy that's attached to a secret, use GetResourcePolicy.

  • To list all of the currently available secrets, use ListSecrets.

" }, "DeleteSecret":{ "name":"DeleteSecret", @@ -93,7 +93,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Retrieves the details of a secret. It does not include the encrypted fields. Only those fields that are populated with a value are returned in the response.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:DescribeSecret

Related operations

" + "documentation":"

Retrieves the details of a secret. It does not include the encrypted fields. Secrets Manager only returns fields populated with a value in the response.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:DescribeSecret

Related operations

" }, "GetRandomPassword":{ "name":"GetRandomPassword", @@ -123,7 +123,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Retrieves the JSON text of the resource-based policy document that's attached to the specified secret. The JSON request string input and response output are shown formatted with white space and line breaks for better readability. Submit your input as a single line JSON string.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:GetResourcePolicy

Related operations

" + "documentation":"

Retrieves the JSON text of the resource-based policy document attached to the specified secret. The JSON request string input and response output displays formatted code with white space and line breaks for better readability. Submit your input as a single line JSON string.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:GetResourcePolicy

Related operations

" }, "GetSecretValue":{ "name":"GetSecretValue", @@ -155,7 +155,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Lists all of the versions attached to the specified secret. The output does not include the SecretString or SecretBinary fields. By default, the list includes only versions that have at least one staging label in VersionStage attached.

Always check the NextToken response parameter when calling any of the List* operations. These operations can occasionally return an empty or shorter than expected list of results even when there are more results available. When this happens, the NextToken response parameter contains a value to pass to the next call to the same API to request the next part of the list.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:ListSecretVersionIds

Related operations

" + "documentation":"

Lists all of the versions attached to the specified secret. The output does not include the SecretString or SecretBinary fields. By default, the list includes only versions that have at least one staging label in VersionStage attached.

Always check the NextToken response parameter when calling any of the List* operations. These operations can occasionally return an empty or shorter than expected list of results even when there more results become available. When this happens, the NextToken response parameter contains a value to pass to the next call to the same API to request the next part of the list.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:ListSecretVersionIds

Related operations

" }, "ListSecrets":{ "name":"ListSecrets", @@ -170,7 +170,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Lists all of the secrets that are stored by Secrets Manager in the AWS account. To list the versions currently stored for a specific secret, use ListSecretVersionIds. The encrypted fields SecretString and SecretBinary are not included in the output. To get that information, call the GetSecretValue operation.

Always check the NextToken response parameter when calling any of the List* operations. These operations can occasionally return an empty or shorter than expected list of results even when there are more results available. When this happens, the NextToken response parameter contains a value to pass to the next call to the same API to request the next part of the list.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:ListSecrets

Related operations

" + "documentation":"

Lists all of the secrets that are stored by Secrets Manager in the AWS account. To list the versions currently stored for a specific secret, use ListSecretVersionIds. The encrypted fields SecretString and SecretBinary are not included in the output. To get that information, call the GetSecretValue operation.

Always check the NextToken response parameter when calling any of the List* operations. These operations can occasionally return an empty or shorter than expected list of results even when there more results become available. When this happens, the NextToken response parameter contains a value to pass to the next call to the same API to request the next part of the list.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:ListSecrets

Related operations

" }, "PutResourcePolicy":{ "name":"PutResourcePolicy", @@ -185,9 +185,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"}, - {"shape":"InvalidRequestException"} + {"shape":"InvalidRequestException"}, + {"shape":"PublicPolicyException"} ], - "documentation":"

Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's Resources element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see Using Resource-Based Policies for AWS Secrets Manager. For the complete description of the AWS policy syntax and grammar, see IAM JSON Policy Reference in the IAM User Guide.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutResourcePolicy

Related operations

  • To retrieve the resource policy that's attached to a secret, use GetResourcePolicy.

  • To delete the resource-based policy that's attached to a secret, use DeleteResourcePolicy.

  • To list all of the currently available secrets, use ListSecrets.

" + "documentation":"

Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's Resources element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see Using Resource-Based Policies for AWS Secrets Manager. For the complete description of the AWS policy syntax and grammar, see IAM JSON Policy Reference in the IAM User Guide.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutResourcePolicy

Related operations

" }, "PutSecretValue":{ "name":"PutSecretValue", @@ -206,7 +207,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.

The Secrets Manager console uses only the SecretString field. To add binary data to a secret with the SecretBinary field you must use the AWS CLI or one of the AWS SDKs.

  • If this operation creates the first version for the secret then Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If another version of this secret already exists, then this operation does not automatically move any staging labels other than those that you explicitly specify in the VersionStages parameter.

  • If this operation moves the staging label AWSCURRENT from another version to this version (because you included it in the StagingLabels parameter) then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

  • This operation is idempotent. If a version with a VersionId with the same value as the ClientRequestToken parameter already exists and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you cannot modify an existing version; you can only create new ones.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutSecretValue

  • kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

" + "documentation":"

Stores a new encrypted secret value in the specified secret. To do this, the operation creates a new version and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. You can also specify the staging labels that are initially attached to the new version.

The Secrets Manager console uses only the SecretString field. To add binary data to a secret with the SecretBinary field you must use the AWS CLI or one of the AWS SDKs.

  • If this operation creates the first version for the secret then Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If another version of this secret already exists, then this operation does not automatically move any staging labels other than those that you explicitly specify in the VersionStages parameter.

  • If this operation moves the staging label AWSCURRENT from another version to this version (because you included it in the StagingLabels parameter) then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

  • This operation is idempotent. If a version with a VersionId with the same value as the ClientRequestToken parameter already exists and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you cannot modify an existing version; you can only create new ones.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS creating the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:PutSecretValue

  • kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to encrypt the secret. You do not need this permission to use the account's default AWS managed CMK for Secrets Manager.

Related operations

" }, "RestoreSecret":{ "name":"RestoreSecret", @@ -238,7 +239,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Configures and starts the asynchronous process of rotating this secret. If you include the configuration parameters, the operation sets those values for the secret and then immediately starts a rotation. If you do not include the configuration parameters, the operation starts a rotation with the values already stored in the secret. After the rotation completes, the protected service and its clients all use the new version of the secret.

This required configuration information includes the ARN of an AWS Lambda function and the time between scheduled rotations. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the protected service to match. After testing the new credentials, the function marks the new secret with the staging label AWSCURRENT so that your clients all immediately begin to use the new version. For more information about rotating secrets and how to configure a Lambda function to rotate the secrets for your protected service, see Rotating Secrets in AWS Secrets Manager in the AWS Secrets Manager User Guide.

Secrets Manager schedules the next rotation when the previous one is complete. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.

The rotation function must end with the versions of the secret in one of two states:

  • The AWSPENDING and AWSCURRENT staging labels are attached to the same version of the secret, or

  • The AWSPENDING staging label is not attached to any version of the secret.

If instead the AWSPENDING staging label is present but is not attached to the same version as AWSCURRENT then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:RotateSecret

  • lambda:InvokeFunction (on the function specified in the secret's metadata)

Related operations

" + "documentation":"

Configures and starts the asynchronous process of rotating this secret. If you include the configuration parameters, the operation sets those values for the secret and then immediately starts a rotation. If you do not include the configuration parameters, the operation starts a rotation with the values already stored in the secret. After the rotation completes, the protected service and its clients all use the new version of the secret.

This required configuration information includes the ARN of an AWS Lambda function and the time between scheduled rotations. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the protected service to match. After testing the new credentials, the function marks the new secret with the staging label AWSCURRENT so that your clients all immediately begin to use the new version. For more information about rotating secrets and how to configure a Lambda function to rotate the secrets for your protected service, see Rotating Secrets in AWS Secrets Manager in the AWS Secrets Manager User Guide.

Secrets Manager schedules the next rotation when the previous one completes. Secrets Manager schedules the date by adding the rotation interval (number of days) to the actual date of the last rotation. The service chooses the hour within that 24-hour date window randomly. The minute is also chosen somewhat randomly, but weighted towards the top of the hour and influenced by a variety of factors that help distribute load.

The rotation function must end with the versions of the secret in one of two states:

  • The AWSPENDING and AWSCURRENT staging labels are attached to the same version of the secret, or

  • The AWSPENDING staging label is not attached to any version of the secret.

If the AWSPENDING staging label is present but not attached to the same version as AWSCURRENT then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:RotateSecret

  • lambda:InvokeFunction (on the function specified in the secret's metadata)

Related operations

" }, "TagResource":{ "name":"TagResource", @@ -253,7 +254,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Attaches one or more tags, each consisting of a key name and a value, to the specified secret. Tags are part of the secret's overall metadata, and are not associated with any specific version of the secret. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

The following basic restrictions apply to tags:

  • Maximum number of tags per secret—50

  • Maximum key length—127 Unicode characters in UTF-8

  • Maximum value length—255 Unicode characters in UTF-8

  • Tag keys and values are case sensitive.

  • Do not use the aws: prefix in your tag names or values because it is reserved for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.

  • If your tagging schema will be used across multiple services and resources, remember that other services might have restrictions on allowed characters. Generally allowed characters are: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.

If you use tags as part of your security strategy, then adding or removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:TagResource

Related operations

  • To remove one or more tags from the collection attached to a secret, use UntagResource.

  • To view the list of tags attached to a secret, use DescribeSecret.

" + "documentation":"

Attaches one or more tags, each consisting of a key name and a value, to the specified secret. Tags are part of the secret's overall metadata, and are not associated with any specific version of the secret. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

The following basic restrictions apply to tags:

  • Maximum number of tags per secret—50

  • Maximum key length—127 Unicode characters in UTF-8

  • Maximum value length—255 Unicode characters in UTF-8

  • Tag keys and values are case sensitive.

  • Do not use the aws: prefix in your tag names or values because AWS reserves it for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.

  • If you use your tagging schema across multiple services and resources, remember other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.

If you use tags as part of your security strategy, then adding or removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:TagResource

Related operations

  • To remove one or more tags from the collection attached to a secret, use UntagResource.

  • To view the list of tags attached to a secret, use DescribeSecret.

" }, "UntagResource":{ "name":"UntagResource", @@ -289,7 +290,7 @@ {"shape":"InternalServiceError"}, {"shape":"PreconditionNotMetException"} ], - "documentation":"

Modifies many of the details of the specified secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.

To modify the rotation configuration of a secret, use RotateSecret instead.

The Secrets Manager console uses only the SecretString parameter and therefore limits you to encrypting and storing only a text string. To encrypt and store binary data as part of the version of a secret, you must use either the AWS CLI or one of the AWS SDKs.

  • If a version with a VersionId with the same value as the ClientRequestToken parameter already exists, the operation results in an error. You cannot modify an existing version, you can only create a new version.

  • If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation that needs to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS having to create the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret is in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecret

  • kms:GenerateDataKey - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

Related operations

" + "documentation":"

Modifies many of the details of the specified secret. If you include a ClientRequestToken and either SecretString or SecretBinary then it also creates a new version attached to the secret.

To modify the rotation configuration of a secret, use RotateSecret instead.

The Secrets Manager console uses only the SecretString parameter and therefore limits you to encrypting and storing only a text string. To encrypt and store binary data as part of the version of a secret, you must use either the AWS CLI or one of the AWS SDKs.

  • If a version with a VersionId with the same value as the ClientRequestToken parameter already exists, the operation results in an error. You cannot modify an existing version, you can only create a new version.

  • If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

  • If you call an operation to encrypt or decrypt the SecretString or SecretBinary for a secret in the same account as the calling user and that secret doesn't specify a AWS KMS encryption key, Secrets Manager uses the account's default AWS managed customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't already exist in your account then Secrets Manager creates it for you automatically. All users and roles in the same AWS account automatically have access to use the default CMK. Note that if an Secrets Manager API call results in AWS creating the account's AWS-managed CMK, it can result in a one-time significant delay in returning the result.

  • If the secret resides in a different AWS account from the credentials calling an API that requires encryption or decryption of the secret value then you must create and use a custom AWS KMS CMK because you can't access the default CMK for the account using credentials from a different AWS account. Store the ARN of the CMK in the secret when you create the secret or when you update it by including it in the KMSKeyId. If you call an API that must encrypt or decrypt SecretString or SecretBinary using credentials from a different account then the AWS KMS key policy must grant cross-account access to that other account's user or role for both the kms:GenerateDataKey and kms:Decrypt operations.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecret

  • kms:GenerateDataKey - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

  • kms:Decrypt - needed only if you use a custom AWS KMS key to encrypt the secret. You do not need this permission to use the account's AWS managed CMK for Secrets Manager.

Related operations

" }, "UpdateSecretVersionStage":{ "name":"UpdateSecretVersionStage", @@ -307,6 +308,23 @@ {"shape":"InternalServiceError"} ], "documentation":"

Modifies the staging labels attached to a version of a secret. Staging labels are used to track a version as it progresses through the secret rotation process. You can attach a staging label to only one version of a secret at a time. If a staging label to be added is already attached to another version, then it is moved--removed from the other version first and then attached to this one. For more information about staging labels, see Staging Labels in the AWS Secrets Manager User Guide.

The staging labels that you specify in the VersionStage parameter are added to the existing list of staging labels--they don't replace it.

You can move the AWSCURRENT staging label to this version by including it in this call.

Whenever you move AWSCURRENT, Secrets Manager automatically moves the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager.

Minimum permissions

To run this command, you must have the following permissions:

  • secretsmanager:UpdateSecretVersionStage

Related operations

  • To get the list of staging labels that are currently associated with a version of a secret, use DescribeSecret and examine the SecretVersionsToStages response value.

" + }, + "ValidateResourcePolicy":{ + "name":"ValidateResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ValidateResourcePolicyRequest"}, + "output":{"shape":"ValidateResourcePolicyResponse"}, + "errors":[ + {"shape":"MalformedPolicyDocumentException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

Validates the JSON text of the resource-based policy document attached to the specified secret. The JSON request string input and response output displays formatted code with white space and line breaks for better readability. Submit your input as a single line JSON string. A resource-based policy is optional.

" } }, "shapes":{ @@ -322,7 +340,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret for which you want to cancel a rotation request. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret to cancel a rotation request. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" } } }, @@ -339,7 +357,7 @@ }, "VersionId":{ "shape":"SecretVersionIdType", - "documentation":"

The unique identifier of the version of the secret that was created during the rotation. This version might not be complete, and should be evaluated for possible deletion. At the very least, you should remove the VersionStage value AWSPENDING to enable this version to be deleted. Failing to clean up a cancelled rotation can block you from successfully starting future rotations.

" + "documentation":"

The unique identifier of the version of the secret created during the rotation. This version might not be complete, and should be evaluated for possible deletion. At the very least, you should remove the VersionStage value AWSPENDING to enable this version to be deleted. Failing to clean up a cancelled rotation can block you from successfully starting future rotations.

" } } }, @@ -354,11 +372,11 @@ "members":{ "Name":{ "shape":"NameType", - "documentation":"

Specifies the friendly name of the new secret.

The secret name must be ASCII letters, digits, or the following characters : /_+=.@-

Don't end your secret name with a hyphen followed by six characters. If you do so, you risk confusion and unexpected results when searching for a secret by partial ARN. This is because Secrets Manager automatically adds a hyphen and six random characters at the end of the ARN.

" + "documentation":"

Specifies the friendly name of the new secret.

The secret name must be ASCII letters, digits, or the following characters : /_+=.@-

Do not end your secret name with a hyphen followed by six characters. If you do so, you risk confusion and unexpected results when searching for a secret by partial ARN. Secrets Manager automatically adds a hyphen and six random characters at the end of the ARN.

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) If you include SecretString or SecretBinary, then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are the same as those in the request, then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing version. Instead, use PutSecretValue to create a new version.

This value becomes the VersionId of the new version.

", + "documentation":"

(Optional) If you include SecretString or SecretBinary, then an initial version is created as part of the secret, and this parameter specifies a unique identifier for the new version.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for the new version and include the value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and the version SecretString and SecretBinary values are the same as those in the request, then the request is ignored.

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing version. Instead, use PutSecretValue to create a new version.

This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "Description":{ @@ -367,7 +385,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

(Optional) Specifies the ARN, Key ID, or alias of the AWS KMS customer master key (CMK) to be used to encrypt the SecretString or SecretBinary values in the versions stored in this secret.

You can specify any of the supported ways to identify a AWS KMS key ID. If you need to reference a CMK in a different account, you can use only the key ARN or the alias ARN.

If you don't specify this value, then Secrets Manager defaults to using the AWS account's default CMK (the one named aws/secretsmanager). If a AWS KMS CMK with that name doesn't yet exist, then Secrets Manager creates it for you automatically the first time it needs to encrypt a version's SecretString or SecretBinary fields.

You can use the account's default CMK to encrypt and decrypt only if you call this operation using credentials from the same account that owns the secret. If the secret is in a different account, then you must create a custom CMK and specify the ARN in this field.

" + "documentation":"

(Optional) Specifies the ARN, Key ID, or alias of the AWS KMS customer master key (CMK) to be used to encrypt the SecretString or SecretBinary values in the versions stored in this secret.

You can specify any of the supported ways to identify a AWS KMS key ID. If you need to reference a CMK in a different account, you can use only the key ARN or the alias ARN.

If you don't specify this value, then Secrets Manager defaults to using the AWS account's default CMK (the one named aws/secretsmanager). If a AWS KMS CMK with that name doesn't yet exist, then Secrets Manager creates it for you automatically the first time it needs to encrypt a version's SecretString or SecretBinary fields.

You can use the account default CMK to encrypt and decrypt only if you call this operation using credentials from the same account that owns the secret. If the secret resides in a different account, then you must create a custom CMK and specify the ARN in this field.

" }, "SecretBinary":{ "shape":"SecretBinaryType", @@ -375,11 +393,11 @@ }, "SecretString":{ "shape":"SecretStringType", - "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret.

Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"username\":\"bob\"},{\"password\":\"abc123xyz456\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" + "documentation":"

(Optional) Specifies text data that you want to encrypt and store in this new version of the secret.

Either SecretString or SecretBinary must have a value, but not both. They cannot both be empty.

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that the Lambda rotation function knows how to parse.

For storing multiple values, we recommend that you use a JSON text string argument and specify key/value pairs. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

{\"username\":\"bob\",\"password\":\"abc123xyz456\"}

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

" }, "Tags":{ "shape":"TagListType", - "documentation":"

(Optional) Specifies a list of user-defined tags that are attached to the secret. Each tag is a \"Key\" and \"Value\" pair of strings. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

  • Secrets Manager tag key names are case sensitive. A tag with the key \"ABC\" is a different tag from one with key \"abc\".

  • If you check tags in IAM policy Condition elements as part of your security strategy, then adding or removing a tag can change permissions. If the successful completion of this operation would result in you losing your permissions for this secret, then this operation is blocked and returns an Access Denied error.

This parameter requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"Key\":\"CostCenter\",\"Value\":\"12345\"},{\"Key\":\"environment\",\"Value\":\"production\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

The following basic restrictions apply to tags:

  • Maximum number of tags per secret—50

  • Maximum key length—127 Unicode characters in UTF-8

  • Maximum value length—255 Unicode characters in UTF-8

  • Tag keys and values are case sensitive.

  • Do not use the aws: prefix in your tag names or values because it is reserved for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.

  • If your tagging schema will be used across multiple services and resources, remember that other services might have restrictions on allowed characters. Generally allowed characters are: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.

" + "documentation":"

(Optional) Specifies a list of user-defined tags that are attached to the secret. Each tag is a \"Key\" and \"Value\" pair of strings. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.

  • Secrets Manager tag key names are case sensitive. A tag with the key \"ABC\" is a different tag from one with key \"abc\".

  • If you check tags in IAM policy Condition elements as part of your security strategy, then adding or removing a tag can change permissions. If the successful completion of this operation would result in you losing your permissions for this secret, then this operation is blocked and returns an Access Denied error.

This parameter requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide. For example:

[{\"Key\":\"CostCenter\",\"Value\":\"12345\"},{\"Key\":\"environment\",\"Value\":\"production\"}]

If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text.

The following basic restrictions apply to tags:

  • Maximum number of tags per secret—50

  • Maximum key length—127 Unicode characters in UTF-8

  • Maximum value length—255 Unicode characters in UTF-8

  • Tag keys and values are case sensitive.

  • Do not use the aws: prefix in your tag names or values because AWS reserves it for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.

  • If you use your tagging schema across multiple services and resources, remember other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @.

" } } }, @@ -396,7 +414,7 @@ }, "VersionId":{ "shape":"SecretVersionIdType", - "documentation":"

The unique identifier that's associated with the version of the secret you just created.

" + "documentation":"

The unique identifier associated with the version of the secret you just created.

" } } }, @@ -415,7 +433,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to delete the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret that you want to delete the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" } } }, @@ -438,7 +456,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to delete. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret that you want to delete. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "RecoveryWindowInDays":{ "shape":"RecoveryWindowInDaysType", @@ -478,7 +496,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

The identifier of the secret whose details you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

The identifier of the secret whose details you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" } } }, @@ -545,6 +563,11 @@ "OwningService":{ "shape":"OwningServiceType", "documentation":"

Returns the name of the service that created this secret.

" + }, + "CreatedDate":{ + "shape":"TimestampType", + "documentation":"

The date that the secret was created.

", + "box":true } } }, @@ -570,6 +593,47 @@ "ExcludeNumbersType":{"type":"boolean"}, "ExcludePunctuationType":{"type":"boolean"}, "ExcludeUppercaseType":{"type":"boolean"}, + "Filter":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"FilterNameStringType", + "documentation":"

Filters your list of secrets by a specific key.

" + }, + "Values":{ + "shape":"FilterValuesStringList", + "documentation":"

Filters your list of secrets by a specific value.

" + } + }, + "documentation":"

Allows you to filter your list of secrets.

" + }, + "FilterNameStringType":{ + "type":"string", + "enum":[ + "description", + "name", + "tag-key", + "tag-value", + "all" + ] + }, + "FilterValueStringType":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[a-zA-Z0-9 :_@\\/\\+\\=\\.\\-]+" + }, + "FilterValuesStringList":{ + "type":"list", + "member":{"shape":"FilterValueStringType"}, + "max":10, + "min":1 + }, + "FiltersListType":{ + "type":"list", + "member":{"shape":"Filter"}, + "max":10 + }, "GetRandomPasswordRequest":{ "type":"structure", "members":{ @@ -629,7 +693,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to retrieve the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret that you want to retrieve the attached resource-based policy for. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" } } }, @@ -656,7 +720,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret containing the version that you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret containing the version that you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "VersionId":{ "shape":"SecretVersionIdType", @@ -758,16 +822,16 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

The identifier for the secret containing the versions you want to list. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

The identifier for the secret containing the versions you want to list. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "MaxResults":{ "shape":"MaxResultsType", - "documentation":"

(Optional) Limits the number of results that you want to include in the response. If you don't include this parameter, it defaults to a value that's specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (isn't null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Secrets Manager might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

", + "documentation":"

(Optional) Limits the number of results you want to include in the response. If you don't include this parameter, it defaults to a value that's specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (isn't null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Secrets Manager might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

", "box":true }, "NextToken":{ "shape":"NextTokenType", - "documentation":"

(Optional) Use this parameter in a request if you receive a NextToken response in a previous request that indicates that there's more output available. In a subsequent call, set it to the value of the previous call's NextToken response to indicate where the output should continue from.

" + "documentation":"

(Optional) Use this parameter in a request if you receive a NextToken response in a previous request indicating there's more output available. In a subsequent call, set it to the value of the previous call NextToken response to indicate where the output should continue from.

" }, "IncludeDeprecated":{ "shape":"BooleanType", @@ -785,7 +849,7 @@ }, "NextToken":{ "shape":"NextTokenType", - "documentation":"

If present in the response, this value indicates that there's more output available than what's included in the current response. This can occur even when the response includes no values at all, such as when you ask for a filtered view of a very long list. Use this value in the NextToken request parameter in a subsequent call to the operation to continue processing and get the next part of the output. You should repeat this until the NextToken response element comes back empty (as null).

" + "documentation":"

If present in the response, this value indicates that there's more output available than included in the current response. This can occur even when the response includes no values at all, such as when you ask for a filtered view of a very long list. Use this value in the NextToken request parameter in a subsequent call to the operation to continue processing and get the next part of the output. You should repeat this until the NextToken response element comes back empty (as null).

" }, "ARN":{ "shape":"SecretARNType", @@ -802,12 +866,20 @@ "members":{ "MaxResults":{ "shape":"MaxResultsType", - "documentation":"

(Optional) Limits the number of results that you want to include in the response. If you don't include this parameter, it defaults to a value that's specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (isn't null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Secrets Manager might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

", + "documentation":"

(Optional) Limits the number of results you want to include in the response. If you don't include this parameter, it defaults to a value that's specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (isn't null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Secrets Manager might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

", "box":true }, "NextToken":{ "shape":"NextTokenType", - "documentation":"

(Optional) Use this parameter in a request if you receive a NextToken response in a previous request that indicates that there's more output available. In a subsequent call, set it to the value of the previous call's NextToken response to indicate where the output should continue from.

" + "documentation":"

(Optional) Use this parameter in a request if you receive a NextToken response in a previous request indicating there's more output available. In a subsequent call, set it to the value of the previous call NextToken response to indicate where the output should continue from.

" + }, + "Filters":{ + "shape":"FiltersListType", + "documentation":"

Lists the secret request filters.

" + }, + "SortOrder":{ + "shape":"SortOrderType", + "documentation":"

Lists secrets in the requested order.

" } } }, @@ -820,7 +892,7 @@ }, "NextToken":{ "shape":"NextTokenType", - "documentation":"

If present in the response, this value indicates that there's more output available than what's included in the current response. This can occur even when the response includes no values at all, such as when you ask for a filtered view of a very long list. Use this value in the NextToken request parameter in a subsequent call to the operation to continue processing and get the next part of the output. You should repeat this until the NextToken response element comes back empty (as null).

" + "documentation":"

If present in the response, this value indicates that there's more output available than included in the current response. This can occur even when the response includes no values at all, such as when you ask for a filtered view of a very long list. Use this value in the NextToken request parameter in a subsequent call to the operation to continue processing and get the next part of the output. You should repeat this until the NextToken response element comes back empty (as null).

" } } }, @@ -870,6 +942,14 @@ "documentation":"

The request failed because you did not complete all the prerequisite steps.

", "exception":true }, + "PublicPolicyException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource policy did not prevent broad access to the secret.

", + "exception":true + }, "PutResourcePolicyRequest":{ "type":"structure", "required":[ @@ -879,11 +959,16 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to attach the resource-based policy to. You can specify either the ARN or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret that you want to attach the resource-based policy to. You can specify either the ARN or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "ResourcePolicy":{ "shape":"NonEmptyResourcePolicyType", "documentation":"

A JSON-formatted string that's constructed according to the grammar and syntax for an AWS resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters in the AWS CLI User Guide.

" + }, + "BlockPublicPolicy":{ + "shape":"BooleanType", + "documentation":"

Makes an optional API call to Zelkova to validate the Resource Policy to prevent broad access to your secret.

", + "box":true } } }, @@ -892,11 +977,11 @@ "members":{ "ARN":{ "shape":"SecretARNType", - "documentation":"

The ARN of the secret that the resource-based policy was retrieved for.

" + "documentation":"

The ARN of the secret retrieved by the resource-based policy.

" }, "Name":{ "shape":"NameType", - "documentation":"

The friendly name of the secret that the resource-based policy was retrieved for.

" + "documentation":"

The friendly name of the secret that the retrieved by the resource-based policy.

" } } }, @@ -906,11 +991,11 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. The secret must already exist.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString or SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and that version's SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing secret version. You can only create new versions to store new secret values.

This value becomes the VersionId of the new version.

", + "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the Lambda rotation function's processing. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

  • If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created.

  • If a version with this value already exists and that version's SecretString or SecretBinary values are the same as those in the request then the request is ignored (the operation is idempotent).

  • If a version with this value already exists and the version of the SecretString and SecretBinary values are different from those in the request then the request fails because you cannot modify an existing secret version. You can only create new versions to store new secret values.

This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "SecretBinary":{ @@ -978,7 +1063,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to restore from a previously scheduled deletion. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret that you want to restore from a previously scheduled deletion. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" } } }, @@ -1001,11 +1086,11 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to rotate. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret that you want to rotate. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", - "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You only need to specify your own value if you are implementing your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the function's processing. This value becomes the VersionId of the new version.

", + "documentation":"

(Optional) Specifies a unique identifier for the new version of the secret that helps ensure idempotency.

If you use the AWS CLI or one of the AWS SDK to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes that in the request for this parameter. If you don't use the SDK and instead generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken yourself for new versions and include that value in the request.

You only need to specify your own value if you implement your own retry logic and want to ensure that a given secret is not created twice. We recommend that you generate a UUID-type value to ensure uniqueness within the specified secret.

Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during the function's processing. This value becomes the VersionId of the new version.

", "idempotencyToken":true }, "RotationLambdaARN":{ @@ -1086,7 +1171,7 @@ }, "KmsKeyId":{ "shape":"KmsKeyIdType", - "documentation":"

The ARN or alias of the AWS KMS customer master key (CMK) that's used to encrypt the SecretString and SecretBinary fields in each version of the secret. If you don't provide a key, then Secrets Manager defaults to encrypting the secret fields with the default KMS CMK (the one named awssecretsmanager) for this account.

" + "documentation":"

The ARN or alias of the AWS KMS customer master key (CMK) used to encrypt the SecretString and SecretBinary fields in each version of the secret. If you don't provide a key, then Secrets Manager defaults to encrypting the secret fields with the default KMS CMK, the key named awssecretsmanager, for this account.

" }, "RotationEnabled":{ "shape":"RotationEnabledType", @@ -1095,7 +1180,7 @@ }, "RotationLambdaARN":{ "shape":"RotationLambdaARNType", - "documentation":"

The ARN of an AWS Lambda function that's invoked by Secrets Manager to rotate and expire the secret either automatically per the schedule or manually by a call to RotateSecret.

" + "documentation":"

The ARN of an AWS Lambda function invoked by Secrets Manager to rotate and expire the secret either automatically per the schedule or manually by a call to RotateSecret.

" }, "RotationRules":{ "shape":"RotationRulesType", @@ -1118,19 +1203,24 @@ }, "DeletedDate":{ "shape":"DeletedDateType", - "documentation":"

The date and time on which this secret was deleted. Not present on active secrets. The secret can be recovered until the number of days in the recovery window has passed, as specified in the RecoveryWindowInDays parameter of the DeleteSecret operation.

" + "documentation":"

The date and time the deletion of the secret occurred. Not present on active secrets. The secret can be recovered until the number of days in the recovery window has passed, as specified in the RecoveryWindowInDays parameter of the DeleteSecret operation.

" }, "Tags":{ "shape":"TagListType", - "documentation":"

The list of user-defined tags that are associated with the secret. To add tags to a secret, use TagResource. To remove tags, use UntagResource.

" + "documentation":"

The list of user-defined tags associated with the secret. To add tags to a secret, use TagResource. To remove tags, use UntagResource.

" }, "SecretVersionsToStages":{ "shape":"SecretVersionsToStagesMapType", - "documentation":"

A list of all of the currently assigned SecretVersionStage staging labels and the SecretVersionId that each is attached to. Staging labels are used to keep track of the different versions during the rotation process.

A version that does not have any SecretVersionStage is considered deprecated and subject to deletion. Such versions are not included in this list.

" + "documentation":"

A list of all of the currently assigned SecretVersionStage staging labels and the SecretVersionId attached to each one. Staging labels are used to keep track of the different versions during the rotation process.

A version that does not have any SecretVersionStage is considered deprecated and subject to deletion. Such versions are not included in this list.

" }, "OwningService":{ "shape":"OwningServiceType", "documentation":"

Returns the name of the service that created the secret.

" + }, + "CreatedDate":{ + "shape":"TimestampType", + "documentation":"

The date and time when a secret was created.

", + "box":true } }, "documentation":"

A structure that contains the details about a secret. It does not include the encrypted SecretString and SecretBinary values. To get those values, use the GetSecretValue operation.

" @@ -1199,6 +1289,13 @@ "key":{"shape":"SecretVersionIdType"}, "value":{"shape":"SecretVersionStagesType"} }, + "SortOrderType":{ + "type":"string", + "enum":[ + "asc", + "desc" + ] + }, "Tag":{ "type":"structure", "members":{ @@ -1208,7 +1305,7 @@ }, "Value":{ "shape":"TagValueType", - "documentation":"

The string value that's associated with the key of the tag.

" + "documentation":"

The string value associated with the key of the tag.

" } }, "documentation":"

A structure that contains information about a tag.

" @@ -1235,7 +1332,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

The identifier for the secret that you want to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

The identifier for the secret that you want to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "Tags":{ "shape":"TagListType", @@ -1248,6 +1345,7 @@ "max":256, "min":0 }, + "TimestampType":{"type":"timestamp"}, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -1257,7 +1355,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

The identifier for the secret that you want to remove tags from. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

The identifier for the secret that you want to remove tags from. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "TagKeys":{ "shape":"TagKeyListType", @@ -1271,7 +1369,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret that you want to modify or to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret that you want to modify or to which you want to add a new version. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenType", @@ -1322,7 +1420,7 @@ "members":{ "SecretId":{ "shape":"SecretIdType", - "documentation":"

Specifies the secret with the version whose list of staging labels you want to modify. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names that end with a hyphen followed by six characters.

" + "documentation":"

Specifies the secret with the version with the list of staging labels you want to modify. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" }, "VersionStage":{ "shape":"SecretVersionStageType", @@ -1335,7 +1433,7 @@ }, "MoveToVersionId":{ "shape":"SecretVersionIdType", - "documentation":"

(Optional) The secret version ID that you want to add the staging label to. If you want to remove a label from a version, then do not specify this parameter.

If the staging label is already attached to a different version of the secret, then you must also specify the RemoveFromVersionId parameter.

", + "documentation":"

(Optional) The secret version ID that you want to add the staging label. If you want to remove a label from a version, then do not specify this parameter.

If the staging label is already attached to a different version of the secret, then you must also specify the RemoveFromVersionId parameter.

", "box":true } } @@ -1345,14 +1443,59 @@ "members":{ "ARN":{ "shape":"SecretARNType", - "documentation":"

The ARN of the secret with the staging label that was modified.

" + "documentation":"

The ARN of the secret with the modified staging label.

" }, "Name":{ "shape":"SecretNameType", - "documentation":"

The friendly name of the secret with the staging label that was modified.

" + "documentation":"

The friendly name of the secret with the modified staging label.

" } } + }, + "ValidateResourcePolicyRequest":{ + "type":"structure", + "required":["ResourcePolicy"], + "members":{ + "SecretId":{ + "shape":"SecretIdType", + "documentation":"

The identifier for the secret that you want to validate a resource policy. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.

If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too—for example, if you don’t include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you’re specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don’t create secret names ending with a hyphen followed by six characters.

If you specify an incomplete ARN without the random suffix, and instead provide the 'friendly name', you must not include the random suffix. If you do include the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException or an AccessDeniedException error, depending on your permissions.

" + }, + "ResourcePolicy":{ + "shape":"NonEmptyResourcePolicyType", + "documentation":"

Identifies the Resource Policy attached to the secret.

" + } + } + }, + "ValidateResourcePolicyResponse":{ + "type":"structure", + "members":{ + "PolicyValidationPassed":{ + "shape":"BooleanType", + "documentation":"

Returns a message stating that your Reource Policy passed validation.

" + }, + "ValidationErrors":{ + "shape":"ValidationErrorsType", + "documentation":"

Returns an error message if your policy doesn't pass validatation.

" + } + } + }, + "ValidationErrorsEntry":{ + "type":"structure", + "members":{ + "CheckName":{ + "shape":"NameType", + "documentation":"

Checks the name of the policy.

" + }, + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

Displays error messages if validation encounters problems during validation of the resource policy.

" + } + }, + "documentation":"

Displays errors that occurred during validation of the resource policy.

" + }, + "ValidationErrorsType":{ + "type":"list", + "member":{"shape":"ValidationErrorsEntry"} } }, - "documentation":"AWS Secrets Manager API Reference

AWS Secrets Manager is a web service that enables you to store, manage, and retrieve, secrets.

This guide provides descriptions of the Secrets Manager API. For more information about using this service, see the AWS Secrets Manager User Guide.

API Version

This version of the Secrets Manager API Reference documents the Secrets Manager API version 2017-10-17.

As an alternative to using the API directly, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms (such as Java, Ruby, .NET, iOS, and Android). The SDKs provide a convenient way to create programmatic access to AWS Secrets Manager. For example, the SDKs take care of cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the AWS SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the AWS SDKs to make programmatic API calls to Secrets Manager. However, you also can use the Secrets Manager HTTP Query API to make direct calls to the Secrets Manager web service. To learn more about the Secrets Manager HTTP Query API, see Making Query Requests in the AWS Secrets Manager User Guide.

Secrets Manager supports GET and POST requests for all actions. That is, the API doesn't require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Support and Feedback for AWS Secrets Manager

We welcome your feedback. Send your comments to awssecretsmanager-feedback@amazon.com, or post your feedback and questions in the AWS Secrets Manager Discussion Forum. For more information about the AWS Discussion Forums, see Forums Help.

How examples are presented

The JSON that AWS Secrets Manager expects as your request parameters and that the service returns as a response to HTTP query requests are single, long strings without line breaks or white space formatting. The JSON shown in the examples is formatted with both line breaks and white space to improve readability. When example input parameters would also result in long strings that extend beyond the screen, we insert line breaks to enhance readability. You should always submit the input as a single JSON text string.

Logging API Requests

AWS Secrets Manager supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information that's collected by AWS CloudTrail, you can determine which requests were successfully made to Secrets Manager, who made the request, when it was made, and so on. For more about AWS Secrets Manager and its support for AWS CloudTrail, see Logging AWS Secrets Manager Events with AWS CloudTrail in the AWS Secrets Manager User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide.

" + "documentation":"AWS Secrets Manager API Reference

AWS Secrets Manager provides a service to enable you to store, manage, and retrieve, secrets.

This guide provides descriptions of the Secrets Manager API. For more information about using this service, see the AWS Secrets Manager User Guide.

API Version

This version of the Secrets Manager API Reference documents the Secrets Manager API version 2017-10-17.

As an alternative to using the API, you can use one of the AWS SDKs, which consist of libraries and sample code for various programming languages and platforms such as Java, Ruby, .NET, iOS, and Android. The SDKs provide a convenient way to create programmatic access to AWS Secrets Manager. For example, the SDKs provide cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the AWS SDKs, including downloading and installing them, see Tools for Amazon Web Services.

We recommend you use the AWS SDKs to make programmatic API calls to Secrets Manager. However, you also can use the Secrets Manager HTTP Query API to make direct calls to the Secrets Manager web service. To learn more about the Secrets Manager HTTP Query API, see Making Query Requests in the AWS Secrets Manager User Guide.

Secrets Manager API supports GET and POST requests for all actions, and doesn't require you to use GET for some actions and POST for others. However, GET requests are subject to the limitation size of a URL. Therefore, for operations that require larger sizes, use a POST request.

Support and Feedback for AWS Secrets Manager

We welcome your feedback. Send your comments to awssecretsmanager-feedback@amazon.com, or post your feedback and questions in the AWS Secrets Manager Discussion Forum. For more information about the AWS Discussion Forums, see Forums Help.

How examples are presented

The JSON that AWS Secrets Manager expects as your request parameters and the service returns as a response to HTTP query requests contain single, long strings without line breaks or white space formatting. The JSON shown in the examples displays the code formatted with both line breaks and white space to improve readability. When example input parameters can also cause long strings extending beyond the screen, you can insert line breaks to enhance readability. You should always submit the input as a single JSON text string.

Logging API Requests

AWS Secrets Manager supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information that's collected by AWS CloudTrail, you can determine the requests successfully made to Secrets Manager, who made the request, when it was made, and so on. For more about AWS Secrets Manager and support for AWS CloudTrail, see Logging AWS Secrets Manager Events with AWS CloudTrail in the AWS Secrets Manager User Guide. To learn more about CloudTrail, including enabling it and find your log files, see the AWS CloudTrail User Guide.

" } diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index 02f4d450b4ee..bcf52bff0752 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityhub/src/main/resources/codegen-resources/service-2.json b/services/securityhub/src/main/resources/codegen-resources/service-2.json index 8c2ee729de07..73a22d75f4f0 100644 --- a/services/securityhub/src/main/resources/codegen-resources/service-2.json +++ b/services/securityhub/src/main/resources/codegen-resources/service-2.json @@ -75,7 +75,23 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidAccessException"} ], - "documentation":"

Imports security findings generated from an integrated third-party product into Security Hub. This action is requested by the integrated product to import its findings into Security Hub.

The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

" + "documentation":"

Imports security findings generated from an integrated third-party product into Security Hub. This action is requested by the integrated product to import its findings into Security Hub.

The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

After a finding is created, BatchImportFindings cannot be used to update the following finding fields and objects, which Security Hub customers use to manage their investigation workflow.

  • Confidence

  • Criticality

  • Note

  • RelatedFindings

  • Severity

  • Types

  • UserDefinedFields

  • VerificationState

  • Workflow

" + }, + "BatchUpdateFindings":{ + "name":"BatchUpdateFindings", + "http":{ + "method":"PATCH", + "requestUri":"/findings/batchupdate" + }, + "input":{"shape":"BatchUpdateFindingsRequest"}, + "output":{"shape":"BatchUpdateFindingsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidInputException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidAccessException"} + ], + "documentation":"

Used by Security Hub customers to update information about their investigation into a finding. Requested by master accounts or member accounts. Master accounts can update findings for their account and their member accounts. Member accounts can update findings for their account.

Updates from BatchUpdateFindings do not affect the value of UpdatedAt for a finding.

Master accounts can use BatchUpdateFindings to update the following finding fields and objects.

  • Confidence

  • Criticality

  • Note

  • RelatedFindings

  • Severity

  • Types

  • UserDefinedFields

  • VerificationState

  • Workflow

Member accounts can only use BatchUpdateFindings to update the Note object.

" }, "CreateActionTarget":{ "name":"CreateActionTarget", @@ -126,7 +142,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the master account. To successfully create a member, you must use this action from an account that already has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation.

After you use CreateMembers to create member account associations in Security Hub, you must use the InviteMembers operation to invite the accounts to enable Security Hub and become member accounts in Security Hub.

If the account owner accepts the invitation, the account becomes a member account in Security Hub, and a permission policy is added that permits the master account to view the findings generated in the member account. When Security Hub is enabled in the invited account, findings start to be sent to both the member and master accounts.

To remove the association between the master and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation.

" + "documentation":"

Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the master account. To successfully create a member, you must use this action from an account that already has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation.

After you use CreateMembers to create member account associations in Security Hub, you must use the InviteMembers operation to invite the accounts to enable Security Hub and become member accounts in Security Hub.

If the account owner accepts the invitation, the account becomes a member account in Security Hub. A permissions policy is added that permits the master account to view the findings generated in the member account. When Security Hub is enabled in the invited account, findings start to be sent to both the member and master accounts.

To remove the association between the master and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation.

" }, "DeclineInvitations":{ "name":"DeclineInvitations", @@ -373,7 +389,7 @@ {"shape":"ResourceConflictException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Enables the integration of a partner product with Security Hub. Integrated products send findings to Security Hub.

When you enable a product integration, a permission policy that grants permission for the product to send findings to Security Hub is applied.

" + "documentation":"

Enables the integration of a partner product with Security Hub. Integrated products send findings to Security Hub.

When you enable a product integration, a permissions policy that grants permission for the product to send findings to Security Hub is applied.

" }, "EnableSecurityHub":{ "name":"EnableSecurityHub", @@ -390,7 +406,7 @@ {"shape":"ResourceConflictException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Enables Security Hub for your account in the current Region or the Region you specify in the request.

When you enable Security Hub, you grant to Security Hub the permissions necessary to gather findings from other services that are integrated with Security Hub.

When you use the EnableSecurityHub operation to enable Security Hub, you also automatically enable the CIS AWS Foundations standard. You do not enable the Payment Card Industry Data Security Standard (PCI DSS) standard. To not enable the CIS AWS Foundations standard, set EnableDefaultStandards to false.

After you enable Security Hub, to enable a standard, use the BatchEnableStandards operation. To disable a standard, use the BatchDisableStandards operation.

To learn more, see Setting Up AWS Security Hub in the AWS Security Hub User Guide.

" + "documentation":"

Enables Security Hub for your account in the current Region or the Region you specify in the request.

When you enable Security Hub, you grant to Security Hub the permissions necessary to gather findings from other services that are integrated with Security Hub.

When you use the EnableSecurityHub operation to enable Security Hub, you also automatically enable the following standards.

  • CIS AWS Foundations

  • AWS Foundational Security Best Practices

You do not enable the Payment Card Industry Data Security Standard (PCI DSS) standard.

To not enable the automatically enabled standards, set EnableDefaultStandards to false.

After you enable Security Hub, to enable a standard, use the BatchEnableStandards operation. To disable a standard, use the BatchDisableStandards operation.

To learn more, see Setting Up AWS Security Hub in the AWS Security Hub User Guide.

" }, "GetEnabledStandards":{ "name":"GetEnabledStandards", @@ -649,7 +665,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates the Note and RecordState of the Security Hub-aggregated findings that the filter attributes specify. Any member account that can view the finding also sees the update to the finding.

" + "documentation":"

UpdateFindings is deprecated. Instead of UpdateFindings, use BatchUpdateFindings.

Updates the Note and RecordState of the Security Hub-aggregated findings that the filter attributes specify. Any member account that can view the finding also sees the update to the finding.

" }, "UpdateInsight":{ "name":"UpdateInsight", @@ -790,6 +806,32 @@ "type":"list", "member":{"shape":"AvailabilityZone"} }, + "AwsAutoScalingAutoScalingGroupDetails":{ + "type":"structure", + "members":{ + "LaunchConfigurationName":{ + "shape":"NonEmptyString", + "documentation":"

The name of the launch configuration.

" + }, + "LoadBalancerNames":{ + "shape":"StringList", + "documentation":"

The list of load balancers associated with the group.

" + }, + "HealthCheckType":{ + "shape":"NonEmptyString", + "documentation":"

The service to use for the health checks.

" + }, + "HealthCheckGracePeriod":{ + "shape":"Integer", + "documentation":"

The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before it checks the health status of an EC2 instance that has come into service.

" + }, + "CreatedTime":{ + "shape":"NonEmptyString", + "documentation":"

The datetime when the auto scaling group was created.

" + } + }, + "documentation":"

Provides details about an auto scaling group.

" + }, "AwsCloudFrontDistributionDetails":{ "type":"structure", "members":{ @@ -862,7 +904,7 @@ "documentation":"

An optional element that causes CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin.

" } }, - "documentation":"

A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), Amazon MediaStore, or other server from which CloudFront gets your files.

" + "documentation":"

A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), Amazon Elemental MediaStore, or other server from which CloudFront gets your files.

" }, "AwsCloudFrontDistributionOriginItemList":{ "type":"list", @@ -925,7 +967,7 @@ }, "Type":{ "shape":"NonEmptyString", - "documentation":"

The type of build environment to use for related builds.

The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Europe (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and Europe (Frankfurt).

The environment type LINUX_CONTAINER with compute type build.general1.2xlarge is available only in regions US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), Europe (Ireland), Europe (London), Europe (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).

The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), Europe (Ireland), Europe (London), Europe (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China (Beijing), and China (Ningxia).

Valid values: WINDOWS_CONTAINER | LINUX_CONTAINER | LINUX_GPU_CONTAINER | ARM_CONTAINER

" + "documentation":"

The type of build environment to use for related builds.

The environment type ARM_CONTAINER is available only in Regions US East (N. Virginia), US East (Ohio), US West (Oregon), Europe (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and Europe (Frankfurt).

The environment type LINUX_CONTAINER with compute type build.general1.2xlarge is available only in Regions US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), Europe (Ireland), Europe (London), Europe (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).

The environment type LINUX_GPU_CONTAINER is available only in Regions US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), Europe (Ireland), Europe (London), Europe (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China (Beijing), and China (Ningxia).

Valid values: WINDOWS_CONTAINER | LINUX_CONTAINER | LINUX_GPU_CONTAINER | ARM_CONTAINER

" } }, "documentation":"

Information about the build environment for this build project.

" @@ -953,7 +995,7 @@ }, "Location":{ "shape":"NonEmptyString", - "documentation":"

Information about the location of the source code to be built.

Valid values include:

  • For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.

  • For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name ).

  • For source code in an S3 input bucket, one of the following.

    • The path to the ZIP file that contains the source code (for example, bucket-name/path/to/object-name.zip).

    • The path to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/).

  • For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file.

  • For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file.

" + "documentation":"

Information about the location of the source code to be built.

Valid values include:

  • For source code settings that are specified in the source action of a pipeline in AWS CodePipeline, location should not be specified. If it is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses the settings in a pipeline's source action instead of this value.

  • For source code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the build spec file (for example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name ).

  • For source code in an S3 input bucket, one of the following.

    • The path to the ZIP file that contains the source code (for example, bucket-name/path/to/object-name.zip).

    • The path to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/).

  • For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the build spec file.

  • For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the build spec file.

" }, "GitCloneDepth":{ "shape":"Integer", @@ -1173,7 +1215,7 @@ "members":{ "CidrIp":{ "shape":"NonEmptyString", - "documentation":"

The IPv4 CIDR range. You can either specify either a CIDR range or a source security group, but not both. To specify a single IPv4 address, use the /32 prefix length.

" + "documentation":"

The IPv4 CIDR range. You can specify either a CIDR range or a source security group, but not both. To specify a single IPv4 address, use the /32 prefix length.

" } }, "documentation":"

A range of IPv4 addresses.

" @@ -1187,7 +1229,7 @@ "members":{ "CidrIpv6":{ "shape":"NonEmptyString", - "documentation":"

The IPv6 CIDR range. You can either specify either a CIDR range or a source security group, but not both. To specify a single IPv6 address, use the /128 prefix length.

" + "documentation":"

The IPv6 CIDR range. You can specify either a CIDR range or a source security group, but not both. To specify a single IPv6 address, use the /128 prefix length.

" } }, "documentation":"

A range of IPv6 addresses.

" @@ -1244,6 +1286,88 @@ "type":"list", "member":{"shape":"AwsEc2SecurityGroupUserIdGroupPair"} }, + "AwsEc2VolumeAttachment":{ + "type":"structure", + "members":{ + "AttachTime":{ + "shape":"NonEmptyString", + "documentation":"

The datetime when the attachment initiated.

" + }, + "DeleteOnTermination":{ + "shape":"Boolean", + "documentation":"

Whether the EBS volume is deleted when the EC2 instance is terminated.

" + }, + "InstanceId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the EC2 instance.

" + }, + "Status":{ + "shape":"NonEmptyString", + "documentation":"

The attachment state of the volume.

" + } + }, + "documentation":"

An attachment to an AWS EC2 volume.

" + }, + "AwsEc2VolumeAttachmentList":{ + "type":"list", + "member":{"shape":"AwsEc2VolumeAttachment"} + }, + "AwsEc2VolumeDetails":{ + "type":"structure", + "members":{ + "CreateTime":{ + "shape":"NonEmptyString", + "documentation":"

The datetime when the volume was created.

" + }, + "Encrypted":{ + "shape":"Boolean", + "documentation":"

Whether the volume is encrypted.

" + }, + "Size":{ + "shape":"Integer", + "documentation":"

The size of the volume, in GiBs.

" + }, + "SnapshotId":{ + "shape":"NonEmptyString", + "documentation":"

The snapshot from which the volume was created.

" + }, + "Status":{ + "shape":"NonEmptyString", + "documentation":"

The volume state.

" + }, + "KmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to protect the volume encryption key for the volume.

" + }, + "Attachments":{ + "shape":"AwsEc2VolumeAttachmentList", + "documentation":"

The volume attachments.

" + } + }, + "documentation":"

Details about an EC2 volume.

" + }, + "AwsEc2VpcDetails":{ + "type":"structure", + "members":{ + "CidrBlockAssociationSet":{ + "shape":"CidrBlockAssociationList", + "documentation":"

Information about the IPv4 CIDR blocks associated with the VPC.

" + }, + "Ipv6CidrBlockAssociationSet":{ + "shape":"Ipv6CidrBlockAssociationList", + "documentation":"

Information about the IPv6 CIDR blocks associated with the VPC.

" + }, + "DhcpOptionsId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the set of Dynamic Host Configuration Protocol (DHCP) options that are associated with the VPC. If the default options are associated with the VPC, then this is default.

" + }, + "State":{ + "shape":"NonEmptyString", + "documentation":"

The current state of the VPC.

" + } + }, + "documentation":"

Details about an EC2 VPC.

" + }, "AwsElasticsearchDomainDetails":{ "type":"structure", "members":{ @@ -1694,7 +1818,7 @@ }, "CompatibleRuntimes":{ "shape":"NonEmptyStringList", - "documentation":"

The layer's compatible runtimes. Maximum number of 5 items.

Valid values: nodejs10.x | nodejs12.x | java8 | java11 | python2.7 | python3.6 | python3.7 | python3.8 | dotnetcore1.0 | dotnetcore2.1 | go1.x | ruby2.5 | provided

" + "documentation":"

The layer's compatible runtimes. Maximum number of five items.

Valid values: nodejs10.x | nodejs12.x | java8 | java11 | python2.7 | python3.6 | python3.7 | python3.8 | dotnetcore1.0 | dotnetcore2.1 | go1.x | ruby2.5 | provided

" }, "CreatedDate":{ "shape":"NonEmptyString", @@ -1717,7 +1841,7 @@ }, "Status":{ "shape":"NonEmptyString", - "documentation":"

Describes the state of the association between the IAM role and the DB instance. The Status property returns one of the following values:

  • ACTIVE - the IAM role ARN is associated with the DB instance and can be used to access other AWS services on your behalf.

  • PENDING - the IAM role ARN is being associated with the DB instance.

  • INVALID - the IAM role ARN is associated with the DB instance, but the DB instance is unable to assume the IAM role in order to access other AWS services on your behalf.

" + "documentation":"

Describes the state of the association between the IAM role and the DB instance. The Status property returns one of the following values:

  • ACTIVE - The IAM role ARN is associated with the DB instance and can be used to access other AWS services on your behalf.

  • PENDING - The IAM role ARN is being associated with the DB instance.

  • INVALID - The IAM role ARN is associated with the DB instance. But the DB instance is unable to assume the IAM role in order to access other AWS services on your behalf.

" } }, "documentation":"

An AWS Identity and Access Management (IAM) role associated with the DB instance.

" @@ -1895,7 +2019,7 @@ "members":{ "ApplyServerSideEncryptionByDefault":{ "shape":"AwsS3BucketServerSideEncryptionByDefault", - "documentation":"

Specifies the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption is applied.

" + "documentation":"

Specifies the default server-side encryption to apply to new objects in the bucket. If a PUT object request doesn't specify any server-side encryption, this default encryption is applied.

" } }, "documentation":"

An encryption rule to apply to the S3 bucket.

" @@ -1932,7 +2056,7 @@ "documentation":"

The identifier of the AWS Key Management Service (AWS KMS) symmetric customer managed customer master key (CMK) that was used for the object.

" } }, - "documentation":"

Details about an AWS S3 object.

" + "documentation":"

Details about an Amazon S3 object.

" }, "AwsSecurityFinding":{ "type":"structure", @@ -1965,7 +2089,7 @@ }, "GeneratorId":{ "shape":"NonEmptyString", - "documentation":"

The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security-findings providers' solutions, this generator can be called a rule, a check, a detector, a plug-in, etc.

" + "documentation":"

The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security-findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc.

" }, "AwsAccountId":{ "shape":"NonEmptyString", @@ -2035,6 +2159,10 @@ "shape":"Network", "documentation":"

The details of network-related information about a finding.

" }, + "NetworkPath":{ + "shape":"NetworkPathList", + "documentation":"

Provides information about a network path that is relevant to a finding. Each entry under NetworkPath represents a component of that path.

" + }, "Process":{ "shape":"ProcessDetails", "documentation":"

The details of process-related information about a finding.

" @@ -2074,6 +2202,10 @@ "Note":{ "shape":"Note", "documentation":"

A user-defined note added to a finding.

" + }, + "Vulnerabilities":{ + "shape":"VulnerabilityList", + "documentation":"

Provides a list of vulnerabilities associated with the findings.

" } }, "documentation":"

Provides consistent format for the contents of the Security Hub-aggregated findings. AwsSecurityFinding format enables you to share findings between AWS security services and third-party solutions, and security standards checks.

A finding is a potential security issue generated either by AWS services (Amazon GuardDuty, Amazon Inspector, and Amazon Macie) or by the integrated third-party solutions and standards checks.

" @@ -2095,7 +2227,7 @@ }, "GeneratorId":{ "shape":"StringFilterList", - "documentation":"

The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security-findings providers' solutions, this generator can be called a rule, a check, a detector, a plug-in, etc.

" + "documentation":"

The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security-findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc.

" }, "Type":{ "shape":"StringFilterList", @@ -2420,6 +2552,28 @@ }, "documentation":"

A collection of attributes that are applied to all active Security Hub-aggregated findings and that result in a subset of findings that are included in this insight.

" }, + "AwsSecurityFindingIdentifier":{ + "type":"structure", + "required":[ + "Id", + "ProductArn" + ], + "members":{ + "Id":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the finding that was specified by the finding provider.

" + }, + "ProductArn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN generated by Security Hub that uniquely identifies a product that generates findings. This can be the ARN for a third-party product that is integrated with Security Hub, or the ARN for a custom integration.

" + } + }, + "documentation":"

Identifies a finding to update using BatchUpdateFindings.

" + }, + "AwsSecurityFindingIdentifierList":{ + "type":"list", + "member":{"shape":"AwsSecurityFindingIdentifier"} + }, "AwsSecurityFindingList":{ "type":"list", "member":{"shape":"AwsSecurityFinding"} @@ -2429,7 +2583,7 @@ "members":{ "KmsMasterKeyId":{ "shape":"NonEmptyString", - "documentation":"

The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK.

" + "documentation":"

The ID of an AWS managed customer master key (CMK) for Amazon SNS or a custom CMK.

" }, "Subscription":{ "shape":"AwsSnsTopicSubscriptionList", @@ -2473,7 +2627,7 @@ }, "KmsMasterKeyId":{ "shape":"NonEmptyString", - "documentation":"

The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.

" + "documentation":"

The ID of an AWS managed customer master key (CMK) for Amazon SQS or a custom CMK.

" }, "QueueName":{ "shape":"NonEmptyString", @@ -2495,7 +2649,7 @@ }, "DefaultAction":{ "shape":"NonEmptyString", - "documentation":"

The action to perform if none of the Rules contained in the WebACL match.

" + "documentation":"

The action to perform if none of the rules contained in the WebACL match.

" }, "Rules":{ "shape":"AwsWafWebAclRuleList", @@ -2513,7 +2667,7 @@ "members":{ "Action":{ "shape":"WafAction", - "documentation":"

Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule.

" + "documentation":"

Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule.

" }, "ExcludedRules":{ "shape":"WafExcludedRuleList", @@ -2525,11 +2679,11 @@ }, "Priority":{ "shape":"Integer", - "documentation":"

Specifies the order in which the Rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before Rules with a higher value. The value must be a unique integer. If you add multiple Rules to a WebACL, the values do not need to be consecutive.

" + "documentation":"

Specifies the order in which the rules in a WebACL are evaluated. Rules with a lower value for Priority are evaluated before rules with a higher value. The value must be a unique integer. If you add multiple rules to a WebACL, the values do not need to be consecutive.

" }, "RuleId":{ "shape":"NonEmptyString", - "documentation":"

The identifier for a Rule.

" + "documentation":"

The identifier for a rule.

" }, "Type":{ "shape":"NonEmptyString", @@ -2611,24 +2765,137 @@ } } }, + "BatchUpdateFindingsRequest":{ + "type":"structure", + "required":["FindingIdentifiers"], + "members":{ + "FindingIdentifiers":{ + "shape":"AwsSecurityFindingIdentifierList", + "documentation":"

The list of findings to update. BatchUpdateFindings can be used to update up to 100 findings at a time.

For each finding, the list provides the finding identifier and the ARN of the finding provider.

" + }, + "Note":{"shape":"NoteUpdate"}, + "Severity":{ + "shape":"SeverityUpdate", + "documentation":"

Used to update the finding severity.

" + }, + "VerificationState":{ + "shape":"VerificationState", + "documentation":"

Indicates the veracity of a finding.

The available values for VerificationState are as follows.

  • UNKNOWN – The default disposition of a security finding

  • TRUE_POSITIVE – The security finding is confirmed

  • FALSE_POSITIVE – The security finding was determined to be a false alarm

  • BENIGN_POSITIVE – A special case of TRUE_POSITIVE where the finding doesn't pose any threat, is expected, or both

" + }, + "Confidence":{ + "shape":"RatioScale", + "documentation":"

The updated value for the finding confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.

Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.

" + }, + "Criticality":{ + "shape":"RatioScale", + "documentation":"

The updated value for the level of importance assigned to the resources associated with the findings.

A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.

" + }, + "Types":{ + "shape":"TypeList", + "documentation":"

One or more finding types in the format of namespace/category/classifier that classify a finding.

Valid namespace values are as follows.

  • Software and Configuration Checks

  • TTPs

  • Effects

  • Unusual Behaviors

  • Sensitive Data Identifications

" + }, + "UserDefinedFields":{ + "shape":"FieldMap", + "documentation":"

A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.

" + }, + "Workflow":{ + "shape":"WorkflowUpdate", + "documentation":"

Used to update the workflow status of a finding.

The workflow status indicates the progress of the investigation into the finding.

" + }, + "RelatedFindings":{ + "shape":"RelatedFindingList", + "documentation":"

A list of findings that are related to the updated findings.

" + } + } + }, + "BatchUpdateFindingsResponse":{ + "type":"structure", + "required":[ + "ProcessedFindings", + "UnprocessedFindings" + ], + "members":{ + "ProcessedFindings":{ + "shape":"AwsSecurityFindingIdentifierList", + "documentation":"

The list of findings that were updated successfully.

" + }, + "UnprocessedFindings":{ + "shape":"BatchUpdateFindingsUnprocessedFindingsList", + "documentation":"

The list of findings that were not updated.

" + } + } + }, + "BatchUpdateFindingsUnprocessedFinding":{ + "type":"structure", + "required":[ + "FindingIdentifier", + "ErrorCode", + "ErrorMessage" + ], + "members":{ + "FindingIdentifier":{ + "shape":"AwsSecurityFindingIdentifier", + "documentation":"

The identifier of the finding that was not updated.

" + }, + "ErrorCode":{ + "shape":"NonEmptyString", + "documentation":"

The code associated with the error.

" + }, + "ErrorMessage":{ + "shape":"NonEmptyString", + "documentation":"

The message associated with the error.

" + } + }, + "documentation":"

A finding from a BatchUpdateFindings request that Security Hub was unable to update.

" + }, + "BatchUpdateFindingsUnprocessedFindingsList":{ + "type":"list", + "member":{"shape":"BatchUpdateFindingsUnprocessedFinding"} + }, "Boolean":{"type":"boolean"}, "CategoryList":{ "type":"list", "member":{"shape":"NonEmptyString"} }, + "CidrBlockAssociation":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"NonEmptyString", + "documentation":"

The association ID for the IPv4 CIDR block.

" + }, + "CidrBlock":{ + "shape":"NonEmptyString", + "documentation":"

The IPv4 CIDR block.

" + }, + "CidrBlockState":{ + "shape":"NonEmptyString", + "documentation":"

Information about the state of the IPv4 CIDR block.

" + } + }, + "documentation":"

An IPv4 CIDR block association.

" + }, + "CidrBlockAssociationList":{ + "type":"list", + "member":{"shape":"CidrBlockAssociation"} + }, "Compliance":{ "type":"structure", "members":{ "Status":{ "shape":"ComplianceStatus", - "documentation":"

The result of a standards check.

" + "documentation":"

The result of a standards check.

The valid values for Status are as follows.

    • PASSED - Standards check passed for all evaluated resources.

    • WARNING - Some information is missing or this check is not supported for your configuration.

    • FAILED - Standards check failed for at least one evaluated resource.

    • NOT_AVAILABLE - Check could not be performed due to a service outage, API error, or because the result of the AWS Config evaluation was NOT_APPLICABLE. If the AWS Config evaluation result was NOT_APPLICABLE, then after 3 days, Security Hub automatically archives the finding.

" }, "RelatedRequirements":{ "shape":"RelatedRequirementsList", - "documentation":"

List of requirements that are related to a standards control.

" + "documentation":"

For a control, the industry or regulatory framework requirements that are related to the control. The check for that control is aligned with these requirements.

" + }, + "StatusReasons":{ + "shape":"StatusReasonsList", + "documentation":"

For findings generated from controls, a list of reasons behind the value of Status. For the list of status reason codes and their meanings, see Standards-related information in the ASFF in the AWS Security Hub User Guide.

" } }, - "documentation":"

Exclusive to findings that are generated as the result of a check run against a specific rule in a supported security standard, such as CIS AWS Foundations. Contains security standard-related finding details.

Values include the following:

  • Allowed values are the following:

    • PASSED - Standards check passed for all evaluated resources.

    • WARNING - Some information is missing or this check is not supported given your configuration.

    • FAILED - Standards check failed for at least one evaluated resource.

    • NOT_AVAILABLE - Check could not be performed due to a service outage, API error, or because the result of the AWS Config evaluation was NOT_APPLICABLE. If the AWS Config evaluation result was NOT_APPLICABLE, then after 3 days, Security Hub automatically archives the finding.

" + "documentation":"

Contains finding details that are specific to control-based findings. Only returned for findings generated from controls.

" }, "ComplianceStatus":{ "type":"string", @@ -2750,6 +3017,28 @@ } } }, + "Cvss":{ + "type":"structure", + "members":{ + "Version":{ + "shape":"NonEmptyString", + "documentation":"

The version of CVSS for the CVSS score.

" + }, + "BaseScore":{ + "shape":"Double", + "documentation":"

The base CVSS score.

" + }, + "BaseVector":{ + "shape":"NonEmptyString", + "documentation":"

The base scoring vector for the CVSS score.

" + } + }, + "documentation":"

CVSS scores from the advisory related to the vulnerability.

" + }, + "CvssList":{ + "type":"list", + "member":{"shape":"Cvss"} + }, "DateFilter":{ "type":"structure", "members":{ @@ -3119,7 +3408,7 @@ "members":{ "Tags":{ "shape":"TagMap", - "documentation":"

The tags to add to the Hub resource when you enable Security Hub.

" + "documentation":"

The tags to add to the hub resource when you enable Security Hub.

" }, "EnableDefaultStandards":{ "shape":"Boolean", @@ -3510,6 +3799,28 @@ "type":"list", "member":{"shape":"IpFilter"} }, + "Ipv6CidrBlockAssociation":{ + "type":"structure", + "members":{ + "AssociationId":{ + "shape":"NonEmptyString", + "documentation":"

The association ID for the IPv6 CIDR block.

" + }, + "Ipv6CidrBlock":{ + "shape":"NonEmptyString", + "documentation":"

The IPv6 CIDR block.

" + }, + "CidrBlockState":{ + "shape":"NonEmptyString", + "documentation":"

Information about the state of the CIDR block.

" + } + }, + "documentation":"

An IPV6 CIDR block association.

" + }, + "Ipv6CidrBlockAssociationList":{ + "type":"list", + "member":{"shape":"Ipv6CidrBlockAssociation"} + }, "KeywordFilter":{ "type":"structure", "members":{ @@ -3796,6 +4107,10 @@ "shape":"NonEmptyString", "documentation":"

The protocol of network-related information about a finding.

" }, + "OpenPortRange":{ + "shape":"PortRange", + "documentation":"

The range of open ports that is present on the network.

" + }, "SourceIpV4":{ "shape":"NonEmptyString", "documentation":"

The source IPv4 address of network-related information about a finding.

" @@ -3842,6 +4157,64 @@ "OUT" ] }, + "NetworkHeader":{ + "type":"structure", + "members":{ + "Protocol":{ + "shape":"NonEmptyString", + "documentation":"

The protocol used for the component.

" + }, + "Destination":{ + "shape":"NetworkPathComponentDetails", + "documentation":"

Information about the destination of the component.

" + }, + "Source":{ + "shape":"NetworkPathComponentDetails", + "documentation":"

Information about the origin of the component.

" + } + }, + "documentation":"

Details about a network path component that occurs before or after the current component.

" + }, + "NetworkPathComponent":{ + "type":"structure", + "members":{ + "ComponentId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of a component in the network path.

" + }, + "ComponentType":{ + "shape":"NonEmptyString", + "documentation":"

The type of component.

" + }, + "Egress":{ + "shape":"NetworkHeader", + "documentation":"

Information about the component that comes after the current component in the network path.

" + }, + "Ingress":{ + "shape":"NetworkHeader", + "documentation":"

Information about the component that comes before the current node in the network path.

" + } + }, + "documentation":"

Information about a network path component.

" + }, + "NetworkPathComponentDetails":{ + "type":"structure", + "members":{ + "Address":{ + "shape":"StringList", + "documentation":"

The IP addresses of the destination.

" + }, + "PortRanges":{ + "shape":"PortRangeList", + "documentation":"

A list of port ranges for the destination.

" + } + }, + "documentation":"

Information about the destination of the next component in the network path.

" + }, + "NetworkPathList":{ + "type":"list", + "member":{"shape":"NetworkPathComponent"} + }, "NextToken":{"type":"string"}, "NonEmptyString":{ "type":"string", @@ -3922,6 +4295,24 @@ "aws-us-gov" ] }, + "PortRange":{ + "type":"structure", + "members":{ + "Begin":{ + "shape":"Integer", + "documentation":"

The first port in the port range.

" + }, + "End":{ + "shape":"Integer", + "documentation":"

The last port in the port range.

" + } + }, + "documentation":"

A range of ports.

" + }, + "PortRangeList":{ + "type":"list", + "member":{"shape":"PortRange"} + }, "ProcessDetails":{ "type":"structure", "members":{ @@ -4003,6 +4394,11 @@ "type":"list", "member":{"shape":"Product"} }, + "RatioScale":{ + "type":"integer", + "max":100, + "min":0 + }, "Recommendation":{ "type":"structure", "members":{ @@ -4111,6 +4507,10 @@ "ResourceDetails":{ "type":"structure", "members":{ + "AwsAutoScalingAutoScalingGroup":{ + "shape":"AwsAutoScalingAutoScalingGroupDetails", + "documentation":"

Details for an autoscaling group.

" + }, "AwsCodeBuildProject":{ "shape":"AwsCodeBuildProjectDetails", "documentation":"

Details for an AWS CodeBuild project.

" @@ -4125,12 +4525,20 @@ }, "AwsEc2NetworkInterface":{ "shape":"AwsEc2NetworkInterfaceDetails", - "documentation":"

Details for an AWS EC2 network interface.

" + "documentation":"

Details for an Amazon EC2 network interface.

" }, "AwsEc2SecurityGroup":{ "shape":"AwsEc2SecurityGroupDetails", "documentation":"

Details for an EC2 security group.

" }, + "AwsEc2Volume":{ + "shape":"AwsEc2VolumeDetails", + "documentation":"

Details for an EC2 volume.

" + }, + "AwsEc2Vpc":{ + "shape":"AwsEc2VpcDetails", + "documentation":"

Details for an EC2 VPC.

" + }, "AwsElbv2LoadBalancer":{ "shape":"AwsElbv2LoadBalancerDetails", "documentation":"

Details about a load balancer.

" @@ -4141,7 +4549,7 @@ }, "AwsS3Bucket":{ "shape":"AwsS3BucketDetails", - "documentation":"

Details about an Amazon S3 Bucket related to a finding.

" + "documentation":"

Details about an Amazon S3 bucket related to a finding.

" }, "AwsS3Object":{ "shape":"AwsS3ObjectDetails", @@ -4169,7 +4577,7 @@ }, "AwsRdsDbInstance":{ "shape":"AwsRdsDbInstanceDetails", - "documentation":"

Details for an RDS database instance.

" + "documentation":"

Details for an Amazon RDS database instance.

" }, "AwsSnsTopic":{ "shape":"AwsSnsTopicDetails", @@ -4235,7 +4643,7 @@ "members":{ "Product":{ "shape":"Double", - "documentation":"

The native severity as defined by the AWS service or integrated partner product that generated the finding.

" + "documentation":"

Deprecated. This attribute is being deprecated. Instead of providing Product, provide Original.

The native severity as defined by the AWS service or integrated partner product that generated the finding.

" }, "Label":{ "shape":"SeverityLabel", @@ -4244,6 +4652,10 @@ "Normalized":{ "shape":"Integer", "documentation":"

Deprecated. This attribute is being deprecated. Instead of providing Normalized, provide Label.

If you provide Normalized and do not provide Label, Label is set automatically as follows.

  • 0 - INFORMATIONAL

  • 1–39 - LOW

  • 40–69 - MEDIUM

  • 70–89 - HIGH

  • 90–100 - CRITICAL

" + }, + "Original":{ + "shape":"NonEmptyString", + "documentation":"

The native severity from the finding product that generated the finding.

" } }, "documentation":"

The severity of the finding.

" @@ -4267,6 +4679,54 @@ "CRITICAL" ] }, + "SeverityUpdate":{ + "type":"structure", + "members":{ + "Normalized":{ + "shape":"RatioScale", + "documentation":"

The normalized severity for the finding. This attribute is to be deprecated in favor of Label.

If you provide Normalized and do not provide Label, Label is set automatically as follows.

  • 0 - INFORMATIONAL

  • 1–39 - LOW

  • 40–69 - MEDIUM

  • 70–89 - HIGH

  • 90–100 - CRITICAL

" + }, + "Product":{ + "shape":"Double", + "documentation":"

The native severity as defined by the AWS service or integrated partner product that generated the finding.

" + }, + "Label":{ + "shape":"SeverityLabel", + "documentation":"

The severity value of the finding. The allowed values are the following.

  • INFORMATIONAL - No issue was found.

  • LOW - The issue does not require action on its own.

  • MEDIUM - The issue must be addressed but not urgently.

  • HIGH - The issue must be addressed as a priority.

  • CRITICAL - The issue must be remediated immediately to avoid it escalating.

" + } + }, + "documentation":"

Updates to the severity information for a finding.

" + }, + "SoftwarePackage":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"

The name of the software package.

" + }, + "Version":{ + "shape":"NonEmptyString", + "documentation":"

The version of the software package.

" + }, + "Epoch":{ + "shape":"NonEmptyString", + "documentation":"

The epoch of the software package.

" + }, + "Release":{ + "shape":"NonEmptyString", + "documentation":"

The release of the software package.

" + }, + "Architecture":{ + "shape":"NonEmptyString", + "documentation":"

The architecture used for the software package.

" + } + }, + "documentation":"

Information about a software package.

" + }, + "SoftwarePackageList":{ + "type":"list", + "member":{"shape":"SoftwarePackage"} + }, "SortCriteria":{ "type":"list", "member":{"shape":"SortCriterion"} @@ -4442,6 +4902,25 @@ "type":"list", "member":{"shape":"StandardsSubscription"} }, + "StatusReason":{ + "type":"structure", + "required":["ReasonCode"], + "members":{ + "ReasonCode":{ + "shape":"NonEmptyString", + "documentation":"

A code that represents a reason for the control status. For the list of status reason codes and their meanings, see Standards-related information in the ASFF in the AWS Security Hub User Guide.

" + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

The corresponding description for the status reason code.

" + } + }, + "documentation":"

Provides additional context for the value of Compliance.Status.

" + }, + "StatusReasonsList":{ + "type":"list", + "member":{"shape":"StatusReason"} + }, "StringFilter":{ "type":"structure", "members":{ @@ -4706,7 +5185,7 @@ }, "DisabledReason":{ "shape":"NonEmptyString", - "documentation":"

A description of the reason why you are disabling a security standard control.

" + "documentation":"

A description of the reason why you are disabling a security standard control. If you are disabling a control, then this is required.

" } } }, @@ -4724,15 +5203,77 @@ "BENIGN_POSITIVE" ] }, + "Vulnerability":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the vulnerability.

" + }, + "VulnerablePackages":{ + "shape":"SoftwarePackageList", + "documentation":"

List of software packages that have the vulnerability.

" + }, + "Cvss":{ + "shape":"CvssList", + "documentation":"

CVSS scores from the advisory related to the vulnerability.

" + }, + "RelatedVulnerabilities":{ + "shape":"StringList", + "documentation":"

List of vulnerabilities that are related to this vulnerability.

" + }, + "Vendor":{ + "shape":"VulnerabilityVendor", + "documentation":"

Information about the vendor that generates the vulnerability report.

" + }, + "ReferenceUrls":{ + "shape":"StringList", + "documentation":"

A list of URLs that provide additional information about the vulnerability.

" + } + }, + "documentation":"

A vulnerability associated with a finding.

" + }, + "VulnerabilityList":{ + "type":"list", + "member":{"shape":"Vulnerability"} + }, + "VulnerabilityVendor":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"

The name of the vendor.

" + }, + "Url":{ + "shape":"NonEmptyString", + "documentation":"

The URL of the vulnerability advisory.

" + }, + "VendorSeverity":{ + "shape":"NonEmptyString", + "documentation":"

The severity that the vendor assigned to the vulnerability.

" + }, + "VendorCreatedAt":{ + "shape":"NonEmptyString", + "documentation":"

The datetime when the vulnerability advisory was created.

" + }, + "VendorUpdatedAt":{ + "shape":"NonEmptyString", + "documentation":"

The datetime when the vulnerability advisory was last updated.

" + } + }, + "documentation":"

A vendor that generates a vulnerability report.

" + }, "WafAction":{ "type":"structure", "members":{ "Type":{ "shape":"NonEmptyString", - "documentation":"

Specifies how you want AWS WAF to respond to requests that match the settings in a Rule.

Valid settings include the following:

  • ALLOW - AWS WAF allows requests

  • BLOCK - AWS WAF blocks requests

  • COUNT - AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can't specify COUNT for the default action for a WebACL.

" + "documentation":"

Specifies how you want AWS WAF to respond to requests that match the settings in a rule.

Valid settings include the following:

  • ALLOW - AWS WAF allows requests

  • BLOCK - AWS WAF blocks requests

  • COUNT - AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can't specify COUNT for the default action for a WebACL.

" } }, - "documentation":"

Details about the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule.

" + "documentation":"

Details about the action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule.

" }, "WafExcludedRule":{ "type":"structure", @@ -4753,7 +5294,7 @@ "members":{ "Type":{ "shape":"NonEmptyString", - "documentation":"

COUNT overrides the action specified by the individual rule within a RuleGroup .

If set to NONE, the rule's action takes place.

" + "documentation":"

COUNT overrides the action specified by the individual rule within a RuleGroup .

If set to NONE, the rule's action takes place.

" } }, "documentation":"

Details about an override action for a rule.

" @@ -4788,6 +5329,16 @@ "RESOLVED", "SUPPRESSED" ] + }, + "WorkflowUpdate":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"WorkflowStatus", + "documentation":"

The status of the investigation into the finding. The allowed values are the following.

  • NEW - The initial state of a finding, before it is reviewed.

  • NOTIFIED - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.

  • RESOLVED - The finding was reviewed and remediated and is now considered resolved.

  • SUPPRESSED - The finding will not be reviewed again and will not be acted upon.

" + } + }, + "documentation":"

Used to update information about the investigation into the finding.

" } }, "documentation":"

Security Hub provides you with a comprehensive view of the security state of your AWS environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from AWS accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the AWS Security Hub User Guide .

When you use operations in the Security Hub API, the requests are executed only in the AWS Region that is currently active or in the specific AWS Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, execute the same command for each Region to apply the change to.

For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the master account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

The following throttling limits apply to using Security Hub API operations.

  • GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second.

  • UpdateFindings - RateLimit of 1 request per second. BurstLimit of 5 requests per second.

  • All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

" diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index e9f5cc76b5d7..72308b2b9a6b 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index 1cab942fcfa6..23871a75ccfd 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalog/src/main/resources/codegen-resources/service-2.json b/services/servicecatalog/src/main/resources/codegen-resources/service-2.json index 47c01ef045d3..06ca301a6f9c 100644 --- a/services/servicecatalog/src/main/resources/codegen-resources/service-2.json +++ b/services/servicecatalog/src/main/resources/codegen-resources/service-2.json @@ -71,7 +71,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Associates the specified product with the specified portfolio.

" + "documentation":"

Associates the specified product with the specified portfolio.

A delegated admin is authorized to invoke this command.

" }, "AssociateServiceActionWithProvisioningArtifact":{ "name":"AssociateServiceActionWithProvisioningArtifact", @@ -160,7 +160,7 @@ {"shape":"LimitExceededException"}, {"shape":"DuplicateResourceException"} ], - "documentation":"

Creates a constraint.

" + "documentation":"

Creates a constraint.

A delegated admin is authorized to invoke this command.

" }, "CreatePortfolio":{ "name":"CreatePortfolio", @@ -175,7 +175,7 @@ {"shape":"LimitExceededException"}, {"shape":"TagOptionNotMigratedException"} ], - "documentation":"

Creates a portfolio.

" + "documentation":"

Creates a portfolio.

A delegated admin is authorized to invoke this command.

" }, "CreatePortfolioShare":{ "name":"CreatePortfolioShare", @@ -192,7 +192,7 @@ {"shape":"OperationNotSupportedException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Shares the specified portfolio with the specified account or organization node. Shares to an organization node can only be created by the master account of an Organization. AWSOrganizationsAccess must be enabled in order to create a portfolio share to an organization node.

" + "documentation":"

Shares the specified portfolio with the specified account or organization node. Shares to an organization node can only be created by the master account of an organization or by a delegated administrator. You can share portfolios to an organization, an organizational unit, or a specific account.

Note that if a delegated admin is de-registered, they can no longer create portfolio shares.

AWSOrganizationsAccess must be enabled in order to create a portfolio share to an organization node.

" }, "CreateProduct":{ "name":"CreateProduct", @@ -207,7 +207,7 @@ {"shape":"LimitExceededException"}, {"shape":"TagOptionNotMigratedException"} ], - "documentation":"

Creates a product.

" + "documentation":"

Creates a product.

A delegated admin is authorized to invoke this command.

" }, "CreateProvisionedProductPlan":{ "name":"CreateProvisionedProductPlan", @@ -280,7 +280,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParametersException"} ], - "documentation":"

Deletes the specified constraint.

" + "documentation":"

Deletes the specified constraint.

A delegated admin is authorized to invoke this command.

" }, "DeletePortfolio":{ "name":"DeletePortfolio", @@ -296,7 +296,7 @@ {"shape":"ResourceInUseException"}, {"shape":"TagOptionNotMigratedException"} ], - "documentation":"

Deletes the specified portfolio.

You cannot delete a portfolio if it was shared with you or if it has associated products, users, constraints, or shared accounts.

" + "documentation":"

Deletes the specified portfolio.

You cannot delete a portfolio if it was shared with you or if it has associated products, users, constraints, or shared accounts.

A delegated admin is authorized to invoke this command.

" }, "DeletePortfolioShare":{ "name":"DeletePortfolioShare", @@ -312,7 +312,7 @@ {"shape":"OperationNotSupportedException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Stops sharing the specified portfolio with the specified account or organization node. Shares to an organization node can only be deleted by the master account of an Organization.

" + "documentation":"

Stops sharing the specified portfolio with the specified account or organization node. Shares to an organization node can only be deleted by the master account of an organization or by a delegated administrator.

Note that if a delegated admin is de-registered, portfolio shares created from that account are removed.

" }, "DeleteProduct":{ "name":"DeleteProduct", @@ -328,7 +328,7 @@ {"shape":"InvalidParametersException"}, {"shape":"TagOptionNotMigratedException"} ], - "documentation":"

Deletes the specified product.

You cannot delete a product if it was shared with you or is associated with a portfolio.

" + "documentation":"

Deletes the specified product.

You cannot delete a product if it was shared with you or is associated with a portfolio.

A delegated admin is authorized to invoke this command.

" }, "DeleteProvisionedProductPlan":{ "name":"DeleteProvisionedProductPlan", @@ -425,7 +425,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets information about the specified portfolio.

" + "documentation":"

Gets information about the specified portfolio.

A delegated admin is authorized to invoke this command.

" }, "DescribePortfolioShareStatus":{ "name":"DescribePortfolioShareStatus", @@ -440,7 +440,7 @@ {"shape":"InvalidParametersException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Gets the status of the specified portfolio share operation. This API can only be called by the master account in the organization.

" + "documentation":"

Gets the status of the specified portfolio share operation. This API can only be called by the master account in the organization or by a delegated admin.

" }, "DescribeProduct":{ "name":"DescribeProduct", @@ -465,7 +465,8 @@ "input":{"shape":"DescribeProductAsAdminInput"}, "output":{"shape":"DescribeProductAsAdminOutput"}, "errors":[ - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParametersException"} ], "documentation":"

Gets information about the specified product. This operation is run with administrator access.

" }, @@ -519,7 +520,8 @@ "input":{"shape":"DescribeProvisioningArtifactInput"}, "output":{"shape":"DescribeProvisioningArtifactOutput"}, "errors":[ - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParametersException"} ], "documentation":"

Gets information about the specified provisioning artifact (also known as a version) for the specified product.

" }, @@ -604,7 +606,7 @@ {"shape":"InvalidStateException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Disable portfolio sharing through AWS Organizations feature. This feature will not delete your current shares but it will prevent you from creating new shares throughout your organization. Current shares will not be in sync with your organization structure if it changes after calling this API. This API can only be called by the master account in the organization.

" + "documentation":"

Disable portfolio sharing through AWS Organizations feature. This feature will not delete your current shares but it will prevent you from creating new shares throughout your organization. Current shares will not be in sync with your organization structure if it changes after calling this API. This API can only be called by the master account in the organization.

This API can't be invoked if there are active delegated administrators in the organization.

Note that a delegated administrator is not authorized to invoke DisableAWSOrganizationsAccess.

" }, "DisassociateBudgetFromResource":{ "name":"DisassociateBudgetFromResource", @@ -646,7 +648,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InvalidParametersException"} ], - "documentation":"

Disassociates the specified product from the specified portfolio.

" + "documentation":"

Disassociates the specified product from the specified portfolio.

A delegated admin is authorized to invoke this command.

" }, "DisassociateServiceActionFromProvisioningArtifact":{ "name":"DisassociateServiceActionFromProvisioningArtifact", @@ -688,7 +690,7 @@ {"shape":"InvalidStateException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Enable portfolio sharing feature through AWS Organizations. This API will allow Service Catalog to receive updates on your organization in order to sync your shares with the current structure. This API can only be called by the master account in the organization.

By calling this API Service Catalog will make a call to organizations:EnableAWSServiceAccess on your behalf so that your shares can be in sync with any changes in your AWS Organizations structure.

" + "documentation":"

Enable portfolio sharing feature through AWS Organizations. This API will allow Service Catalog to receive updates on your organization in order to sync your shares with the current structure. This API can only be called by the master account in the organization.

By calling this API Service Catalog will make a call to organizations:EnableAWSServiceAccess on your behalf so that your shares can be in sync with any changes in your AWS Organizations structure.

Note that a delegated administrator is not authorized to invoke EnableAWSOrganizationsAccess.

" }, "ExecuteProvisionedProductPlan":{ "name":"ExecuteProvisionedProductPlan", @@ -732,7 +734,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Get the Access Status for AWS Organization portfolio share feature. This API can only be called by the master account in the organization.

" + "documentation":"

Get the Access Status for AWS Organization portfolio share feature. This API can only be called by the master account in the organization or by a delegated admin.

" }, "ListAcceptedPortfolioShares":{ "name":"ListAcceptedPortfolioShares", @@ -803,7 +805,7 @@ {"shape":"InvalidParametersException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Lists the organization nodes that have access to the specified portfolio. This API can only be called by the master account in the organization.

" + "documentation":"

Lists the organization nodes that have access to the specified portfolio. This API can only be called by the master account in the organization or by a delegated admin.

If a delegated admin is de-registered, they can no longer perform this operation.

" }, "ListPortfolioAccess":{ "name":"ListPortfolioAccess", @@ -817,7 +819,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParametersException"} ], - "documentation":"

Lists the account IDs that have access to the specified portfolio.

" + "documentation":"

Lists the account IDs that have access to the specified portfolio.

A delegated admin can list the accounts that have access to the shared portfolio. Note that if a delegated admin is de-registered, they can no longer perform this operation.

" }, "ListPortfolios":{ "name":"ListPortfolios", @@ -1988,7 +1990,7 @@ }, "Definition":{ "shape":"ServiceActionDefinitionMap", - "documentation":"

The self-service action definition. Can be one of the following:

Name

The name of the AWS Systems Manager Document. For example, AWS-RestartEC2Instance.

Version

The AWS Systems Manager automation document version. For example, \"Version\": \"1\"

AssumeRole

The Amazon Resource Name (ARN) of the role that performs the self-service actions on your behalf. For example, \"AssumeRole\": \"arn:aws:iam::12345678910:role/ActionRole\".

To reuse the provisioned product launch role, set to \"AssumeRole\": \"LAUNCH_ROLE\".

Parameters

The list of parameters in JSON format.

For example: [{\\\"Name\\\":\\\"InstanceId\\\",\\\"Type\\\":\\\"TARGET\\\"}] or [{\\\"Name\\\":\\\"InstanceId\\\",\\\"Type\\\":\\\"TEXT_VALUE\\\"}].

" + "documentation":"

The self-service action definition. Can be one of the following:

Name

The name of the AWS Systems Manager document (SSM document). For example, AWS-RestartEC2Instance.

If you are using a shared SSM document, you must provide the ARN instead of the name.

Version

The AWS Systems Manager automation document version. For example, \"Version\": \"1\"

AssumeRole

The Amazon Resource Name (ARN) of the role that performs the self-service actions on your behalf. For example, \"AssumeRole\": \"arn:aws:iam::12345678910:role/ActionRole\".

To reuse the provisioned product launch role, set to \"AssumeRole\": \"LAUNCH_ROLE\".

Parameters

The list of parameters in JSON format.

For example: [{\\\"Name\\\":\\\"InstanceId\\\",\\\"Type\\\":\\\"TARGET\\\"}] or [{\\\"Name\\\":\\\"InstanceId\\\",\\\"Type\\\":\\\"TEXT_VALUE\\\"}].

" }, "Description":{ "shape":"ServiceActionDescription", @@ -2348,7 +2350,6 @@ }, "DescribeProductAsAdminInput":{ "type":"structure", - "required":["Id"], "members":{ "AcceptLanguage":{ "shape":"AcceptLanguage", @@ -2357,6 +2358,10 @@ "Id":{ "shape":"Id", "documentation":"

The product identifier.

" + }, + "Name":{ + "shape":"ProductViewName", + "documentation":"

The product name.

" } } }, @@ -2387,7 +2392,6 @@ }, "DescribeProductInput":{ "type":"structure", - "required":["Id"], "members":{ "AcceptLanguage":{ "shape":"AcceptLanguage", @@ -2396,6 +2400,10 @@ "Id":{ "shape":"Id", "documentation":"

The product identifier.

" + }, + "Name":{ + "shape":"ProductViewName", + "documentation":"

The product name.

" } } }, @@ -2413,6 +2421,10 @@ "Budgets":{ "shape":"Budgets", "documentation":"

Information about the associated budgets.

" + }, + "LaunchPaths":{ + "shape":"LaunchPaths", + "documentation":"

Information about the associated launch paths.

" } } }, @@ -2511,10 +2523,6 @@ }, "DescribeProvisioningArtifactInput":{ "type":"structure", - "required":[ - "ProvisioningArtifactId", - "ProductId" - ], "members":{ "AcceptLanguage":{ "shape":"AcceptLanguage", @@ -2528,6 +2536,14 @@ "shape":"Id", "documentation":"

The product identifier.

" }, + "ProvisioningArtifactName":{ + "shape":"ProvisioningArtifactName", + "documentation":"

The provisioning artifact name.

" + }, + "ProductName":{ + "shape":"ProductViewName", + "documentation":"

The product name.

" + }, "Verbose":{ "shape":"Verbose", "documentation":"

Indicates whether a verbose level of detail is enabled.

" @@ -3075,6 +3091,20 @@ "exception":true }, "LastRequestId":{"type":"string"}, + "LaunchPath":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"Id", + "documentation":"

The identifier of the launch path.

" + }, + "Name":{ + "shape":"PortfolioName", + "documentation":"

The name of the launch path.

" + } + }, + "documentation":"

A launch path object.

" + }, "LaunchPathSummaries":{ "type":"list", "member":{"shape":"LaunchPathSummary"} @@ -3101,6 +3131,10 @@ }, "documentation":"

Summary information about a product path for a user.

" }, + "LaunchPaths":{ + "type":"list", + "member":{"shape":"LaunchPath"} + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -4501,7 +4535,10 @@ }, "ProvisioningArtifactActive":{"type":"boolean"}, "ProvisioningArtifactCreatedTime":{"type":"timestamp"}, - "ProvisioningArtifactDescription":{"type":"string"}, + "ProvisioningArtifactDescription":{ + "type":"string", + "max":8192 + }, "ProvisioningArtifactDetail":{ "type":"structure", "members":{ @@ -4556,7 +4593,10 @@ }, "ProvisioningArtifactInfoKey":{"type":"string"}, "ProvisioningArtifactInfoValue":{"type":"string"}, - "ProvisioningArtifactName":{"type":"string"}, + "ProvisioningArtifactName":{ + "type":"string", + "max":8192 + }, "ProvisioningArtifactParameter":{ "type":"structure", "members":{ @@ -5912,7 +5952,7 @@ }, "ProvisionedProductProperties":{ "shape":"ProvisionedProductProperties", - "documentation":"

A map that contains the provisioned product properties to be updated.

The OWNER key only accepts user ARNs. The owner is the user that is allowed to see, update, terminate, and execute service actions in the provisioned product.

The administrator can change the owner of a provisioned product to another IAM user within the same account. Both end user owners and administrators can see ownership history of the provisioned product using the ListRecordHistory API. The new owner can describe all past records for the provisioned product using the DescribeRecord API. The previous owner can no longer use DescribeRecord, but can still see the product's history from when he was an owner using ListRecordHistory.

If a provisioned product ownership is assigned to an end user, they can see and perform any action through the API or Service Catalog console such as update, terminate, and execute service actions. If an end user provisions a product and the owner is updated to someone else, they will no longer be able to see or perform any actions through API or the Service Catalog console on that provisioned product.

" + "documentation":"

A map that contains the provisioned product properties to be updated.

The OWNER key accepts user ARNs and role ARNs. The owner is the user that is allowed to see, update, terminate, and execute service actions in the provisioned product.

The administrator can change the owner of a provisioned product to another IAM user within the same account. Both end user owners and administrators can see ownership history of the provisioned product using the ListRecordHistory API. The new owner can describe all past records for the provisioned product using the DescribeRecord API. The previous owner can no longer use DescribeRecord, but can still see the product's history from when he was an owner using ListRecordHistory.

If a provisioned product ownership is assigned to an end user, they can see and perform any action through the API or Service Catalog console such as update, terminate, and execute service actions. If an end user provisions a product and the owner is updated to someone else, they will no longer be able to see or perform any actions through API or the Service Catalog console on that provisioned product.

" }, "IdempotencyToken":{ "shape":"IdempotencyToken", diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index 7049e4773180..a5e268fc09c2 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicediscovery/src/main/resources/codegen-resources/service-2.json b/services/servicediscovery/src/main/resources/codegen-resources/service-2.json index a5b5cfaba5d8..21f6d1072c61 100644 --- a/services/servicediscovery/src/main/resources/codegen-resources/service-2.json +++ b/services/servicediscovery/src/main/resources/codegen-resources/service-2.json @@ -25,9 +25,10 @@ {"shape":"InvalidInput"}, {"shape":"NamespaceAlreadyExists"}, {"shape":"ResourceLimitExceeded"}, - {"shape":"DuplicateRequest"} + {"shape":"DuplicateRequest"}, + {"shape":"TooManyTagsException"} ], - "documentation":"

Creates an HTTP namespace. Service instances that you register using an HTTP namespace can be discovered using a DiscoverInstances request but can't be discovered using DNS.

For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" + "documentation":"

Creates an HTTP namespace. Service instances that you register using an HTTP namespace can be discovered using a DiscoverInstances request but can't be discovered using DNS.

For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" }, "CreatePrivateDnsNamespace":{ "name":"CreatePrivateDnsNamespace", @@ -41,9 +42,10 @@ {"shape":"InvalidInput"}, {"shape":"NamespaceAlreadyExists"}, {"shape":"ResourceLimitExceeded"}, - {"shape":"DuplicateRequest"} + {"shape":"DuplicateRequest"}, + {"shape":"TooManyTagsException"} ], - "documentation":"

Creates a private namespace based on DNS, which will be visible only inside a specified Amazon VPC. The namespace defines your service naming scheme. For example, if you name your namespace example.com and name your service backend, the resulting DNS name for the service will be backend.example.com. For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" + "documentation":"

Creates a private namespace based on DNS, which will be visible only inside a specified Amazon VPC. The namespace defines your service naming scheme. For example, if you name your namespace example.com and name your service backend, the resulting DNS name for the service will be backend.example.com. For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" }, "CreatePublicDnsNamespace":{ "name":"CreatePublicDnsNamespace", @@ -57,9 +59,10 @@ {"shape":"InvalidInput"}, {"shape":"NamespaceAlreadyExists"}, {"shape":"ResourceLimitExceeded"}, - {"shape":"DuplicateRequest"} + {"shape":"DuplicateRequest"}, + {"shape":"TooManyTagsException"} ], - "documentation":"

Creates a public namespace based on DNS, which will be visible on the internet. The namespace defines your service naming scheme. For example, if you name your namespace example.com and name your service backend, the resulting DNS name for the service will be backend.example.com. For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" + "documentation":"

Creates a public namespace based on DNS, which will be visible on the internet. The namespace defines your service naming scheme. For example, if you name your namespace example.com and name your service backend, the resulting DNS name for the service will be backend.example.com. For the current limit on the number of namespaces that you can create using the same AWS account, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" }, "CreateService":{ "name":"CreateService", @@ -73,9 +76,10 @@ {"shape":"InvalidInput"}, {"shape":"ResourceLimitExceeded"}, {"shape":"NamespaceNotFound"}, - {"shape":"ServiceAlreadyExists"} + {"shape":"ServiceAlreadyExists"}, + {"shape":"TooManyTagsException"} ], - "documentation":"

Creates a service, which defines the configuration for the following entities:

  • For public and private DNS namespaces, one of the following combinations of DNS records in Amazon Route 53:

    • A

    • AAAA

    • A and AAAA

    • SRV

    • CNAME

  • Optionally, a health check

After you create the service, you can submit a RegisterInstance request, and AWS Cloud Map uses the values in the configuration to create the specified entities.

For the current limit on the number of instances that you can register using the same namespace and using the same service, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" + "documentation":"

Creates a service, which defines the configuration for the following entities:

  • For public and private DNS namespaces, one of the following combinations of DNS records in Amazon Route 53:

    • A

    • AAAA

    • A and AAAA

    • SRV

    • CNAME

  • Optionally, a health check

After you create the service, you can submit a RegisterInstance request, and AWS Cloud Map uses the values in the configuration to create the specified entities.

For the current limit on the number of instances that you can register using the same namespace and using the same service, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" }, "DeleteNamespace":{ "name":"DeleteNamespace", @@ -136,9 +140,10 @@ "errors":[ {"shape":"ServiceNotFound"}, {"shape":"NamespaceNotFound"}, - {"shape":"InvalidInput"} + {"shape":"InvalidInput"}, + {"shape":"RequestLimitExceeded"} ], - "documentation":"

Discovers registered instances for a specified namespace and service.

", + "documentation":"

Discovers registered instances for a specified namespace and service. You can use DiscoverInstances to discover instances for any type of namespace. For public and private DNS namespaces, you can also use DNS queries to discover instances.

", "endpoint":{"hostPrefix":"data-"} }, "GetInstance":{ @@ -197,7 +202,7 @@ {"shape":"InvalidInput"}, {"shape":"OperationNotFound"} ], - "documentation":"

Gets information about any operation that returns an operation ID in the response, such as a CreateService request.

To get a list of operations that match specified criteria, see ListOperations.

" + "documentation":"

Gets information about any operation that returns an operation ID in the response, such as a CreateService request.

To get a list of operations that match specified criteria, see ListOperations.

" }, "GetService":{ "name":"GetService", @@ -266,6 +271,20 @@ ], "documentation":"

Lists summary information for all the services that are associated with one or more specified namespaces.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidInput"} + ], + "documentation":"

Lists tags for the specified resource.

" + }, "RegisterInstance":{ "name":"RegisterInstance", "http":{ @@ -281,7 +300,36 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ServiceNotFound"} ], - "documentation":"

Creates or updates one or more records and, optionally, creates a health check based on the settings in a specified service. When you submit a RegisterInstance request, the following occurs:

  • For each DNS record that you define in the service that is specified by ServiceId, a record is created or updated in the hosted zone that is associated with the corresponding namespace.

  • If the service includes HealthCheckConfig, a health check is created based on the settings in the health check configuration.

  • The health check, if any, is associated with each of the new or updated records.

One RegisterInstance request must complete before you can submit another request and specify the same service ID and instance ID.

For more information, see CreateService.

When AWS Cloud Map receives a DNS query for the specified DNS name, it returns the applicable value:

  • If the health check is healthy: returns all the records

  • If the health check is unhealthy: returns the applicable value for the last healthy instance

  • If you didn't specify a health check configuration: returns all the records

For the current limit on the number of instances that you can register using the same namespace and using the same service, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" + "documentation":"

Creates or updates one or more records and, optionally, creates a health check based on the settings in a specified service. When you submit a RegisterInstance request, the following occurs:

  • For each DNS record that you define in the service that is specified by ServiceId, a record is created or updated in the hosted zone that is associated with the corresponding namespace.

  • If the service includes HealthCheckConfig, a health check is created based on the settings in the health check configuration.

  • The health check, if any, is associated with each of the new or updated records.

One RegisterInstance request must complete before you can submit another request and specify the same service ID and instance ID.

For more information, see CreateService.

When AWS Cloud Map receives a DNS query for the specified DNS name, it returns the applicable value:

  • If the health check is healthy: returns all the records

  • If the health check is unhealthy: returns the applicable value for the last healthy instance

  • If you didn't specify a health check configuration: returns all the records

For the current limit on the number of instances that you can register using the same namespace and using the same service, see AWS Cloud Map Limits in the AWS Cloud Map Developer Guide.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyTagsException"}, + {"shape":"InvalidInput"} + ], + "documentation":"

Adds one or more tags to the specified resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidInput"} + ], + "documentation":"

Removes one or more tags from the specified resource.

" }, "UpdateInstanceCustomHealthStatus":{ "name":"UpdateInstanceCustomHealthStatus", @@ -296,7 +344,7 @@ {"shape":"CustomHealthNotFound"}, {"shape":"InvalidInput"} ], - "documentation":"

Submits a request to change the health status of a custom health check to healthy or unhealthy.

You can use UpdateInstanceCustomHealthStatus to change the status only for custom health checks, which you define using HealthCheckCustomConfig when you create a service. You can't use it to change the status for Route 53 health checks, which you define using HealthCheckConfig.

For more information, see HealthCheckCustomConfig.

" + "documentation":"

Submits a request to change the health status of a custom health check to healthy or unhealthy.

You can use UpdateInstanceCustomHealthStatus to change the status only for custom health checks, which you define using HealthCheckCustomConfig when you create a service. You can't use it to change the status for Route 53 health checks, which you define using HealthCheckConfig.

For more information, see HealthCheckCustomConfig.

" }, "UpdateService":{ "name":"UpdateService", @@ -311,21 +359,28 @@ {"shape":"InvalidInput"}, {"shape":"ServiceNotFound"} ], - "documentation":"

Submits a request to perform the following operations:

  • Add or delete DnsRecords configurations

  • Update the TTL setting for existing DnsRecords configurations

  • Add, update, or delete HealthCheckConfig for a specified service

For public and private DNS namespaces, you must specify all DnsRecords configurations (and, optionally, HealthCheckConfig) that you want to appear in the updated service. Any current configurations that don't appear in an UpdateService request are deleted.

When you update the TTL setting for a service, AWS Cloud Map also updates the corresponding settings in all the records and health checks that were created by using the specified service.

" + "documentation":"

Submits a request to perform the following operations:

  • Update the TTL setting for existing DnsRecords configurations

  • Add, update, or delete HealthCheckConfig for a specified service

    You can't add, update, or delete a HealthCheckCustomConfig configuration.

For public and private DNS namespaces, note the following:

  • If you omit any existing DnsRecords or HealthCheckConfig configurations from an UpdateService request, the configurations are deleted from the service.

  • If you omit an existing HealthCheckCustomConfig configuration from an UpdateService request, the configuration is not deleted from the service.

When you update settings for a service, AWS Cloud Map also updates the corresponding settings in all the records and health checks that were created by using the specified service.

" } }, "shapes":{ + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1 + }, "Arn":{ "type":"string", "max":255 }, "AttrKey":{ "type":"string", - "max":255 + "max":255, + "pattern":"^[a-zA-Z0-9!-~]+$" }, "AttrValue":{ "type":"string", - "max":1024 + "max":1024, + "pattern":"^([a-zA-Z0-9!-~][ \\ta-zA-Z0-9!-~]*){0,1}[a-zA-Z0-9!-~]{0,1}$" }, "Attributes":{ "type":"map", @@ -349,6 +404,10 @@ "Description":{ "shape":"ResourceDescription", "documentation":"

A description for the namespace.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the namespace. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, @@ -357,7 +416,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" + "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" } } }, @@ -384,6 +443,10 @@ "Vpc":{ "shape":"ResourceId", "documentation":"

The ID of the Amazon VPC that you want to associate the namespace with.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the namespace. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, @@ -392,7 +455,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" + "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" } } }, @@ -412,6 +475,10 @@ "Description":{ "shape":"ResourceDescription", "documentation":"

A description for the namespace.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the namespace. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, @@ -420,7 +487,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" + "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" } } }, @@ -430,7 +497,7 @@ "members":{ "Name":{ "shape":"ServiceName", - "documentation":"

The name that you want to assign to the service.

" + "documentation":"

The name that you want to assign to the service.

If you want AWS Cloud Map to create an SRV record when you register an instance, and if you're using a system that requires a specific SRV format, such as HAProxy, specify the following for Name:

  • Start the name with an underscore (_), such as _exampleservice

  • End the name with ._protocol, such as ._tcp

When you register an instance, AWS Cloud Map creates an SRV record and assigns a name to the record by concatenating the service name and the namespace name, for example:

_exampleservice._tcp.example.com

" }, "NamespaceId":{ "shape":"ResourceId", @@ -451,11 +518,15 @@ }, "HealthCheckConfig":{ "shape":"HealthCheckConfig", - "documentation":"

Public DNS namespaces only. A complex type that contains settings for an optional Route 53 health check. If you specify settings for a health check, AWS Cloud Map associates the health check with all the Route 53 DNS records that you specify in DnsConfig.

If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.

For information about the charges for health checks, see AWS Cloud Map Pricing.

" + "documentation":"

Public DNS and HTTP namespaces only. A complex type that contains settings for an optional Route 53 health check. If you specify settings for a health check, AWS Cloud Map associates the health check with all the Route 53 DNS records that you specify in DnsConfig.

If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.

For information about the charges for health checks, see AWS Cloud Map Pricing.

" }, "HealthCheckCustomConfig":{ "shape":"HealthCheckCustomConfig", - "documentation":"

A complex type that contains information about an optional custom health check.

If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.

" + "documentation":"

A complex type that contains information about an optional custom health check.

If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.

You can't add, update, or delete a HealthCheckCustomConfig configuration from an existing service.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the service. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.

" } } }, @@ -498,7 +569,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" + "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" } } }, @@ -530,7 +601,7 @@ }, "InstanceId":{ "shape":"ResourceId", - "documentation":"

The value that you specified for Id in the RegisterInstance request.

" + "documentation":"

The value that you specified for Id in the RegisterInstance request.

" } } }, @@ -539,7 +610,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

A value that you can use to determine whether the request completed successfully. For more information, see GetOperation.

" + "documentation":"

A value that you can use to determine whether the request completed successfully. For more information, see GetOperation.

" } } }, @@ -559,8 +630,8 @@ "documentation":"

The name of the service that you specified when you registered the instance.

" }, "MaxResults":{ - "shape":"MaxResults", - "documentation":"

The maximum number of instances that you want Cloud Map to return in the response to a DiscoverInstances request. If you don't specify a value for MaxResults, Cloud Map returns up to 100 instances.

" + "shape":"DiscoverMaxResults", + "documentation":"

The maximum number of instances that you want AWS Cloud Map to return in the response to a DiscoverInstances request. If you don't specify a value for MaxResults, AWS Cloud Map returns up to 100 instances.

" }, "QueryParameters":{ "shape":"Attributes", @@ -581,6 +652,11 @@ } } }, + "DiscoverMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, "DnsConfig":{ "type":"structure", "required":["DnsRecords"], @@ -593,7 +669,7 @@ }, "RoutingPolicy":{ "shape":"RoutingPolicy", - "documentation":"

The routing policy that you want to apply to all Route 53 DNS records that AWS Cloud Map creates when you register an instance and specify this service.

If you want to use this service to register instances that create alias records, specify WEIGHTED for the routing policy.

You can specify the following values:

MULTIVALUE

If you define a health check for the service and the health check is healthy, Route 53 returns the applicable value for up to eight instances.

For example, suppose the service includes configurations for one A record and a health check, and you use the service to register 10 instances. Route 53 responds to DNS queries with IP addresses for up to eight healthy instances. If fewer than eight instances are healthy, Route 53 responds to every DNS query with the IP addresses for all of the healthy instances.

If you don't define a health check for the service, Route 53 assumes that all instances are healthy and returns the values for up to eight instances.

For more information about the multivalue routing policy, see Multivalue Answer Routing in the Route 53 Developer Guide.

WEIGHTED

Route 53 returns the applicable value from one randomly selected instance from among the instances that you registered using the same service. Currently, all records have the same weight, so you can't route more or less traffic to any instances.

For example, suppose the service includes configurations for one A record and a health check, and you use the service to register 10 instances. Route 53 responds to DNS queries with the IP address for one randomly selected instance from among the healthy instances. If no instances are healthy, Route 53 responds to DNS queries as if all of the instances were healthy.

If you don't define a health check for the service, Route 53 assumes that all instances are healthy and returns the applicable value for one randomly selected instance.

For more information about the weighted routing policy, see Weighted Routing in the Route 53 Developer Guide.

" + "documentation":"

The routing policy that you want to apply to all Route 53 DNS records that AWS Cloud Map creates when you register an instance and specify this service.

If you want to use this service to register instances that create alias records, specify WEIGHTED for the routing policy.

You can specify the following values:

MULTIVALUE

If you define a health check for the service and the health check is healthy, Route 53 returns the applicable value for up to eight instances.

For example, suppose the service includes configurations for one A record and a health check, and you use the service to register 10 instances. Route 53 responds to DNS queries with IP addresses for up to eight healthy instances. If fewer than eight instances are healthy, Route 53 responds to every DNS query with the IP addresses for all of the healthy instances.

If you don't define a health check for the service, Route 53 assumes that all instances are healthy and returns the values for up to eight instances.

For more information about the multivalue routing policy, see Multivalue Answer Routing in the Route 53 Developer Guide.

WEIGHTED

Route 53 returns the applicable value from one randomly selected instance from among the instances that you registered using the same service. Currently, all records have the same weight, so you can't route more or less traffic to any instances.

For example, suppose the service includes configurations for one A record and a health check, and you use the service to register 10 instances. Route 53 responds to DNS queries with the IP address for one randomly selected instance from among the healthy instances. If no instances are healthy, Route 53 responds to DNS queries as if all of the instances were healthy.

If you don't define a health check for the service, Route 53 assumes that all instances are healthy and returns the applicable value for one randomly selected instance.

For more information about the weighted routing policy, see Weighted Routing in the Route 53 Developer Guide.

" }, "DnsRecords":{ "shape":"DnsRecordList", @@ -632,11 +708,11 @@ "members":{ "Type":{ "shape":"RecordType", - "documentation":"

The type of the resource, which indicates the type of value that Route 53 returns in response to DNS queries.

Note the following:

  • A, AAAA, and SRV records: You can specify settings for a maximum of one A, one AAAA, and one SRV record. You can specify them in any combination.

  • CNAME records: If you specify CNAME for Type, you can't define any other records. This is a limitation of DNS: you can't create a CNAME record and any other type of record that has the same name as a CNAME record.

  • Alias records: If you want AWS Cloud Map to create a Route 53 alias record when you register an instance, specify A or AAAA for Type.

  • All records: You specify settings other than TTL and Type when you register an instance.

The following values are supported:

A

Route 53 returns the IP address of the resource in IPv4 format, such as 192.0.2.44.

AAAA

Route 53 returns the IP address of the resource in IPv6 format, such as 2001:0db8:85a3:0000:0000:abcd:0001:2345.

CNAME

Route 53 returns the domain name of the resource, such as www.example.com. Note the following:

  • You specify the domain name that you want to route traffic to when you register an instance. For more information, see RegisterInstanceRequest$Attributes.

  • You must specify WEIGHTED for the value of RoutingPolicy.

  • You can't specify both CNAME for Type and settings for HealthCheckConfig. If you do, the request will fail with an InvalidInput error.

SRV

Route 53 returns the value for an SRV record. The value for an SRV record uses the following values:

priority weight port service-hostname

Note the following about the values:

  • The values of priority and weight are both set to 1 and can't be changed.

  • The value of port comes from the value that you specify for the AWS_INSTANCE_PORT attribute when you submit a RegisterInstance request.

  • The value of service-hostname is a concatenation of the following values:

    • The value that you specify for InstanceId when you register an instance.

    • The name of the service.

    • The name of the namespace.

    For example, if the value of InstanceId is test, the name of the service is backend, and the name of the namespace is example.com, the value of service-hostname is:

    test.backend.example.com

If you specify settings for an SRV record and if you specify values for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both in the RegisterInstance request, AWS Cloud Map automatically creates A and/or AAAA records that have the same name as the value of service-hostname in the SRV record. You can ignore these records.

" + "documentation":"

The type of the resource, which indicates the type of value that Route 53 returns in response to DNS queries. You can specify values for Type in the following combinations:

  • A

  • AAAA

  • A and AAAA

  • SRV

  • CNAME

If you want AWS Cloud Map to create a Route 53 alias record when you register an instance, specify A or AAAA for Type.

You specify other settings, such as the IP address for A and AAAA records, when you register an instance. For more information, see RegisterInstance.

The following values are supported:

A

Route 53 returns the IP address of the resource in IPv4 format, such as 192.0.2.44.

AAAA

Route 53 returns the IP address of the resource in IPv6 format, such as 2001:0db8:85a3:0000:0000:abcd:0001:2345.

CNAME

Route 53 returns the domain name of the resource, such as www.example.com. Note the following:

  • You specify the domain name that you want to route traffic to when you register an instance. For more information, see Attributes in the topic RegisterInstance.

  • You must specify WEIGHTED for the value of RoutingPolicy.

  • You can't specify both CNAME for Type and settings for HealthCheckConfig. If you do, the request will fail with an InvalidInput error.

SRV

Route 53 returns the value for an SRV record. The value for an SRV record uses the following values:

priority weight port service-hostname

Note the following about the values:

  • The values of priority and weight are both set to 1 and can't be changed.

  • The value of port comes from the value that you specify for the AWS_INSTANCE_PORT attribute when you submit a RegisterInstance request.

  • The value of service-hostname is a concatenation of the following values:

    • The value that you specify for InstanceId when you register an instance.

    • The name of the service.

    • The name of the namespace.

    For example, if the value of InstanceId is test, the name of the service is backend, and the name of the namespace is example.com, the value of service-hostname is:

    test.backend.example.com

If you specify settings for an SRV record, note the following:

  • If you specify values for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both in the RegisterInstance request, AWS Cloud Map automatically creates A and/or AAAA records that have the same name as the value of service-hostname in the SRV record. You can ignore these records.

  • If you're using a system that requires a specific SRV format, such as HAProxy, see the Name element in the documentation about CreateService for information about how to specify the correct name format.

" }, "TTL":{ "shape":"RecordTTL", - "documentation":"

The amount of time, in seconds, that you want DNS resolvers to cache the settings for this record.

Alias records don't include a TTL because Route 53 uses the TTL for the AWS resource that an alias record routes traffic to. If you include the AWS_ALIAS_DNS_NAME attribute when you submit a RegisterInstance request, the TTL value is ignored. Always specify a TTL for the service; you can use a service to register instances that create either alias or non-alias records.

" + "documentation":"

The amount of time, in seconds, that you want DNS resolvers to cache the settings for this record.

Alias records don't include a TTL because Route 53 uses the TTL for the AWS resource that an alias record routes traffic to. If you include the AWS_ALIAS_DNS_NAME attribute when you submit a RegisterInstance request, the TTL value is ignored. Always specify a TTL for the service; you can use a service to register instances that create either alias or non-alias records.

" } }, "documentation":"

A complex type that contains information about the Route 53 DNS records that you want AWS Cloud Map to create when you register an instance.

" @@ -716,7 +792,7 @@ }, "Instances":{ "shape":"InstanceIdList", - "documentation":"

An array that contains the IDs of all the instances that you want to get the health status for.

If you omit Instances, AWS Cloud Map returns the health status for all the instances that are associated with the specified service.

To get the IDs for the instances that you've registered by using a specified service, submit a ListInstances request.

" + "documentation":"

An array that contains the IDs of all the instances that you want to get the health status for.

If you omit Instances, AWS Cloud Map returns the health status for all the instances that are associated with the specified service.

To get the IDs for the instances that you've registered by using a specified service, submit a ListInstances request.

" }, "MaxResults":{ "shape":"MaxResults", @@ -804,7 +880,7 @@ "members":{ "Type":{ "shape":"HealthCheckType", - "documentation":"

The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy.

You can't change the value of Type after you create a health check.

You can create the following types of health checks:

  • HTTP: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.

  • HTTPS: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.

    If you specify HTTPS for the value of Type, the endpoint must support TLS v1.0 or later.

  • TCP: Route 53 tries to establish a TCP connection.

    If you specify TCP for Type, don't specify a value for ResourcePath.

For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide.

" + "documentation":"

The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy.

You can't change the value of Type after you create a health check.

You can create the following types of health checks:

  • HTTP: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTP request and waits for an HTTP status code of 200 or greater and less than 400.

  • HTTPS: Route 53 tries to establish a TCP connection. If successful, Route 53 submits an HTTPS request and waits for an HTTP status code of 200 or greater and less than 400.

    If you specify HTTPS for the value of Type, the endpoint must support TLS v1.0 or later.

  • TCP: Route 53 tries to establish a TCP connection.

    If you specify TCP for Type, don't specify a value for ResourcePath.

For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide.

" }, "ResourcePath":{ "shape":"ResourcePath", @@ -812,20 +888,20 @@ }, "FailureThreshold":{ "shape":"FailureThreshold", - "documentation":"

The number of consecutive health checks that an endpoint must pass or fail for Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide.

" + "documentation":"

The number of consecutive health checks that an endpoint must pass or fail for Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Route 53 Developer Guide.

" } }, - "documentation":"

Public DNS namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, AWS Cloud Map associates the health check with the records that you specify in DnsConfig.

If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.

Health checks are basic Route 53 health checks that monitor an AWS endpoint. For information about pricing for health checks, see Amazon Route 53 Pricing.

Note the following about configuring health checks.

A and AAAA records

If DnsConfig includes configurations for both A and AAAA records, AWS Cloud Map creates a health check that uses the IPv4 address to check the health of the resource. If the endpoint that is specified by the IPv4 address is unhealthy, Route 53 considers both the A and AAAA records to be unhealthy.

CNAME records

You can't specify settings for HealthCheckConfig when the DNSConfig includes CNAME for the value of Type. If you do, the CreateService request will fail with an InvalidInput error.

Request interval

A Route 53 health checker in each health-checking region sends a health check request to an endpoint every 30 seconds. On average, your endpoint receives a health check request about every two seconds. However, health checkers don't coordinate with one another, so you'll sometimes see several requests per second followed by a few seconds with no health checks at all.

Health checking regions

Health checkers perform checks from all Route 53 health-checking regions. For a list of the current regions, see Regions.

Alias records

When you register an instance, if you include the AWS_ALIAS_DNS_NAME attribute, AWS Cloud Map creates a Route 53 alias record. Note the following:

  • Route 53 automatically sets EvaluateTargetHealth to true for alias records. When EvaluateTargetHealth is true, the alias record inherits the health of the referenced AWS resource. such as an ELB load balancer. For more information, see EvaluateTargetHealth.

  • If you include HealthCheckConfig and then use the service to register an instance that creates an alias record, Route 53 doesn't create the health check.

Charges for health checks

Health checks are basic Route 53 health checks that monitor an AWS endpoint. For information about pricing for health checks, see Amazon Route 53 Pricing.

" + "documentation":"

Public DNS and HTTP namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, AWS Cloud Map associates the health check with the records that you specify in DnsConfig.

If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.

Health checks are basic Route 53 health checks that monitor an AWS endpoint. For information about pricing for health checks, see Amazon Route 53 Pricing.

Note the following about configuring health checks.

A and AAAA records

If DnsConfig includes configurations for both A and AAAA records, AWS Cloud Map creates a health check that uses the IPv4 address to check the health of the resource. If the endpoint that is specified by the IPv4 address is unhealthy, Route 53 considers both the A and AAAA records to be unhealthy.

CNAME records

You can't specify settings for HealthCheckConfig when the DNSConfig includes CNAME for the value of Type. If you do, the CreateService request will fail with an InvalidInput error.

Request interval

A Route 53 health checker in each health-checking region sends a health check request to an endpoint every 30 seconds. On average, your endpoint receives a health check request about every two seconds. However, health checkers don't coordinate with one another, so you'll sometimes see several requests per second followed by a few seconds with no health checks at all.

Health checking regions

Health checkers perform checks from all Route 53 health-checking regions. For a list of the current regions, see Regions.

Alias records

When you register an instance, if you include the AWS_ALIAS_DNS_NAME attribute, AWS Cloud Map creates a Route 53 alias record. Note the following:

  • Route 53 automatically sets EvaluateTargetHealth to true for alias records. When EvaluateTargetHealth is true, the alias record inherits the health of the referenced AWS resource. such as an ELB load balancer. For more information, see EvaluateTargetHealth.

  • If you include HealthCheckConfig and then use the service to register an instance that creates an alias record, Route 53 doesn't create the health check.

Charges for health checks

Health checks are basic Route 53 health checks that monitor an AWS endpoint. For information about pricing for health checks, see Amazon Route 53 Pricing.

" }, "HealthCheckCustomConfig":{ "type":"structure", "members":{ "FailureThreshold":{ "shape":"FailureThreshold", - "documentation":"

The number of 30-second intervals that you want Cloud Map to wait after receiving an UpdateInstanceCustomHealthStatus request before it changes the health status of a service instance. For example, suppose you specify a value of 2 for FailureTheshold, and then your application sends an UpdateInstanceCustomHealthStatus request. Cloud Map waits for approximately 60 seconds (2 x 30) before changing the status of the service instance based on that request.

Sending a second or subsequent UpdateInstanceCustomHealthStatus request with the same value before FailureThreshold x 30 seconds has passed doesn't accelerate the change. Cloud Map still waits FailureThreshold x 30 seconds after the first request to make the change.

" + "documentation":"

The number of 30-second intervals that you want AWS Cloud Map to wait after receiving an UpdateInstanceCustomHealthStatus request before it changes the health status of a service instance. For example, suppose you specify a value of 2 for FailureTheshold, and then your application sends an UpdateInstanceCustomHealthStatus request. AWS Cloud Map waits for approximately 60 seconds (2 x 30) before changing the status of the service instance based on that request.

Sending a second or subsequent UpdateInstanceCustomHealthStatus request with the same value before FailureThreshold x 30 seconds has passed doesn't accelerate the change. AWS Cloud Map still waits FailureThreshold x 30 seconds after the first request to make the change.

" } }, - "documentation":"

A complex type that contains information about an optional custom health check. A custom health check, which requires that you use a third-party health checker to evaluate the health of your resources, is useful in the following circumstances:

  • You can't use a health check that is defined by HealthCheckConfig because the resource isn't available over the internet. For example, you can use a custom health check when the instance is in an Amazon VPC. (To check the health of resources in a VPC, the health checker must also be in the VPC.)

  • You want to use a third-party health checker regardless of where your resources are.

If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.

To change the status of a custom health check, submit an UpdateInstanceCustomHealthStatus request. Cloud Map doesn't monitor the status of the resource, it just keeps a record of the status specified in the most recent UpdateInstanceCustomHealthStatus request.

Here's how custom health checks work:

  1. You create a service and specify a value for FailureThreshold.

    The failure threshold indicates the number of 30-second intervals you want AWS Cloud Map to wait between the time that your application sends an UpdateInstanceCustomHealthStatus request and the time that AWS Cloud Map stops routing internet traffic to the corresponding resource.

  2. You register an instance.

  3. You configure a third-party health checker to monitor the resource that is associated with the new instance.

    AWS Cloud Map doesn't check the health of the resource directly.

  4. The third-party health-checker determines that the resource is unhealthy and notifies your application.

  5. Your application submits an UpdateInstanceCustomHealthStatus request.

  6. AWS Cloud Map waits for (FailureThreshold x 30) seconds.

  7. If another UpdateInstanceCustomHealthStatus request doesn't arrive during that time to change the status back to healthy, AWS Cloud Map stops routing traffic to the resource.

Note the following about configuring custom health checks.

" + "documentation":"

A complex type that contains information about an optional custom health check. A custom health check, which requires that you use a third-party health checker to evaluate the health of your resources, is useful in the following circumstances:

  • You can't use a health check that is defined by HealthCheckConfig because the resource isn't available over the internet. For example, you can use a custom health check when the instance is in an Amazon VPC. (To check the health of resources in a VPC, the health checker must also be in the VPC.)

  • You want to use a third-party health checker regardless of where your resources are.

If you specify a health check configuration, you can specify either HealthCheckCustomConfig or HealthCheckConfig but not both.

To change the status of a custom health check, submit an UpdateInstanceCustomHealthStatus request. AWS Cloud Map doesn't monitor the status of the resource, it just keeps a record of the status specified in the most recent UpdateInstanceCustomHealthStatus request.

Here's how custom health checks work:

  1. You create a service and specify a value for FailureThreshold.

    The failure threshold indicates the number of 30-second intervals you want AWS Cloud Map to wait between the time that your application sends an UpdateInstanceCustomHealthStatus request and the time that AWS Cloud Map stops routing internet traffic to the corresponding resource.

  2. You register an instance.

  3. You configure a third-party health checker to monitor the resource that is associated with the new instance.

    AWS Cloud Map doesn't check the health of the resource directly.

  4. The third-party health-checker determines that the resource is unhealthy and notifies your application.

  5. Your application submits an UpdateInstanceCustomHealthStatus request.

  6. AWS Cloud Map waits for (FailureThreshold x 30) seconds.

  7. If another UpdateInstanceCustomHealthStatus request doesn't arrive during that time to change the status back to healthy, AWS Cloud Map stops routing traffic to the resource.

" }, "HealthCheckType":{ "type":"string", @@ -875,7 +951,7 @@ "documentation":"

If you included any attributes when you registered the instance, the values of those attributes.

" } }, - "documentation":"

In a response to a DiscoverInstance request, HttpInstanceSummary contains information about one instance that matches the values that you specified in the request.

" + "documentation":"

In a response to a DiscoverInstances request, HttpInstanceSummary contains information about one instance that matches the values that you specified in the request.

" }, "HttpInstanceSummaryList":{ "type":"list", @@ -897,7 +973,7 @@ "members":{ "Id":{ "shape":"ResourceId", - "documentation":"

An identifier that you want to associate with the instance. Note the following:

  • If the service that is specified by ServiceId includes settings for an SRV record, the value of InstanceId is automatically included as part of the value for the SRV record. For more information, see DnsRecord$Type.

  • You can use this value to update an existing instance.

  • To register a new instance, you must specify a value that is unique among instances that you register by using the same service.

  • If you specify an existing InstanceId and ServiceId, AWS Cloud Map updates the existing DNS records. If there's also an existing health check, AWS Cloud Map deletes the old health check and creates a new one.

    The health check isn't deleted immediately, so it will still appear for a while if you submit a ListHealthChecks request, for example.

" + "documentation":"

An identifier that you want to associate with the instance. Note the following:

  • If the service that is specified by ServiceId includes settings for an SRV record, the value of InstanceId is automatically included as part of the value for the SRV record. For more information, see DnsRecord > Type.

  • You can use this value to update an existing instance.

  • To register a new instance, you must specify a value that is unique among instances that you register by using the same service.

  • If you specify an existing InstanceId and ServiceId, AWS Cloud Map updates the existing DNS records. If there's also an existing health check, AWS Cloud Map deletes the old health check and creates a new one.

    The health check isn't deleted immediately, so it will still appear for a while if you submit a ListHealthChecks request, for example.

" }, "CreatorRequestId":{ "shape":"ResourceId", @@ -905,7 +981,7 @@ }, "Attributes":{ "shape":"Attributes", - "documentation":"

A string map that contains the following information for the service that you specify in ServiceId:

  • The attributes that apply to the records that are defined in the service.

  • For each attribute, the applicable value.

Supported attribute keys include the following:

AWS_ALIAS_DNS_NAME

If you want AWS Cloud Map to create a Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that is associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget.

Note the following:

  • The configuration for the service that is specified by ServiceId must include settings for an A record, an AAAA record, or both.

  • In the service that is specified by ServiceId, the value of RoutingPolicy must be WEIGHTED.

  • If the service that is specified by ServiceId includes HealthCheckConfig settings, AWS Cloud Map will create the health check, but it won't associate the health check with the alias record.

  • Auto naming currently doesn't support creating alias records that route traffic to AWS resources other than ELB load balancers.

  • If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values for any of the AWS_INSTANCE attributes.

AWS_INSTANCE_CNAME

If the service configuration includes a CNAME record, the domain name that you want Route 53 to return in response to DNS queries, for example, example.com.

This value is required if the service specified by ServiceId includes settings for an CNAME record.

AWS_INSTANCE_IPV4

If the service configuration includes an A record, the IPv4 address that you want Route 53 to return in response to DNS queries, for example, 192.0.2.44.

This value is required if the service specified by ServiceId includes settings for an A record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_IPV6

If the service configuration includes an AAAA record, the IPv6 address that you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

This value is required if the service specified by ServiceId includes settings for an AAAA record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_PORT

If the service includes an SRV record, the value that you want Route 53 to return for the port.

If the service includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send requests to.

This value is required if you specified settings for an SRV record when you created the service.

" + "documentation":"

A string map that contains the following information for the service that you specify in ServiceId:

  • The attributes that apply to the records that are defined in the service.

  • For each attribute, the applicable value.

Supported attribute keys include the following:

AWS_ALIAS_DNS_NAME

If you want AWS Cloud Map to create a Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that is associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget.

Note the following:

  • The configuration for the service that is specified by ServiceId must include settings for an A record, an AAAA record, or both.

  • In the service that is specified by ServiceId, the value of RoutingPolicy must be WEIGHTED.

  • If the service that is specified by ServiceId includes HealthCheckConfig settings, AWS Cloud Map will create the health check, but it won't associate the health check with the alias record.

  • Auto naming currently doesn't support creating alias records that route traffic to AWS resources other than ELB load balancers.

  • If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values for any of the AWS_INSTANCE attributes.

AWS_INSTANCE_CNAME

If the service configuration includes a CNAME record, the domain name that you want Route 53 to return in response to DNS queries, for example, example.com.

This value is required if the service specified by ServiceId includes settings for an CNAME record.

AWS_INSTANCE_IPV4

If the service configuration includes an A record, the IPv4 address that you want Route 53 to return in response to DNS queries, for example, 192.0.2.44.

This value is required if the service specified by ServiceId includes settings for an A record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_IPV6

If the service configuration includes an AAAA record, the IPv6 address that you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

This value is required if the service specified by ServiceId includes settings for an AAAA record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_PORT

If the service includes an SRV record, the value that you want Route 53 to return for the port.

If the service includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send requests to.

This value is required if you specified settings for an SRV record or a Route 53 health check when you created the service.

" } }, "documentation":"

A complex type that contains information about an instance that AWS Cloud Map creates when you submit a RegisterInstance request.

" @@ -1075,6 +1151,25 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to retrieve tags for.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

The tags that are assigned to the resource.

" + } + } + }, "MaxResults":{ "type":"integer", "max":100, @@ -1098,7 +1193,7 @@ }, "Type":{ "shape":"NamespaceType", - "documentation":"

The type of the namespace. Valid values are DNS_PUBLIC and DNS_PRIVATE.

" + "documentation":"

The type of the namespace. The methods for discovering instances depends on the value that you specify:

  • HTTP: Instances can be discovered only programmatically, using the AWS Cloud Map DiscoverInstances API.

  • DNS_PUBLIC: Instances can be discovered using public DNS queries and using the DiscoverInstances API.

  • DNS_PRIVATE: Instances can be discovered using DNS queries in VPCs and using the DiscoverInstances API.

" }, "Description":{ "shape":"ResourceDescription", @@ -1353,7 +1448,7 @@ "documentation":"

The status of the operation. Values include the following:

  • SUBMITTED: This is the initial state immediately after you submit a request.

  • PENDING: AWS Cloud Map is performing the operation.

  • SUCCESS: The operation succeeded.

  • FAIL: The operation failed. For the failure reason, see ErrorMessage.

" } }, - "documentation":"

A complex type that contains information about an operation that matches the criteria that you specified in a ListOperations request.

" + "documentation":"

A complex type that contains information about an operation that matches the criteria that you specified in a ListOperations request.

" }, "OperationSummaryList":{ "type":"list", @@ -1410,7 +1505,7 @@ }, "InstanceId":{ "shape":"ResourceId", - "documentation":"

An identifier that you want to associate with the instance. Note the following:

  • If the service that is specified by ServiceId includes settings for an SRV record, the value of InstanceId is automatically included as part of the value for the SRV record. For more information, see DnsRecord$Type.

  • You can use this value to update an existing instance.

  • To register a new instance, you must specify a value that is unique among instances that you register by using the same service.

  • If you specify an existing InstanceId and ServiceId, AWS Cloud Map updates the existing DNS records, if any. If there's also an existing health check, AWS Cloud Map deletes the old health check and creates a new one.

    The health check isn't deleted immediately, so it will still appear for a while if you submit a ListHealthChecks request, for example.

" + "documentation":"

An identifier that you want to associate with the instance. Note the following:

  • If the service that is specified by ServiceId includes settings for an SRV record, the value of InstanceId is automatically included as part of the value for the SRV record. For more information, see DnsRecord > Type.

  • You can use this value to update an existing instance.

  • To register a new instance, you must specify a value that is unique among instances that you register by using the same service.

  • If you specify an existing InstanceId and ServiceId, AWS Cloud Map updates the existing DNS records, if any. If there's also an existing health check, AWS Cloud Map deletes the old health check and creates a new one.

    The health check isn't deleted immediately, so it will still appear for a while if you submit a ListHealthChecks request, for example.

" }, "CreatorRequestId":{ "shape":"ResourceId", @@ -1419,7 +1514,7 @@ }, "Attributes":{ "shape":"Attributes", - "documentation":"

A string map that contains the following information for the service that you specify in ServiceId:

  • The attributes that apply to the records that are defined in the service.

  • For each attribute, the applicable value.

Supported attribute keys include the following:

AWS_ALIAS_DNS_NAME

If you want AWS Cloud Map to create an Amazon Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that is associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget in the Route 53 API Reference.

Note the following:

  • The configuration for the service that is specified by ServiceId must include settings for an A record, an AAAA record, or both.

  • In the service that is specified by ServiceId, the value of RoutingPolicy must be WEIGHTED.

  • If the service that is specified by ServiceId includes HealthCheckConfig settings, AWS Cloud Map will create the Route 53 health check, but it won't associate the health check with the alias record.

  • Auto naming currently doesn't support creating alias records that route traffic to AWS resources other than ELB load balancers.

  • If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values for any of the AWS_INSTANCE attributes.

AWS_INIT_HEALTH_STATUS

If the service configuration includes HealthCheckCustomConfig, you can optionally use AWS_INIT_HEALTH_STATUS to specify the initial status of the custom health check, HEALTHY or UNHEALTHY. If you don't specify a value for AWS_INIT_HEALTH_STATUS, the initial status is HEALTHY.

AWS_INSTANCE_CNAME

If the service configuration includes a CNAME record, the domain name that you want Route 53 to return in response to DNS queries, for example, example.com.

This value is required if the service specified by ServiceId includes settings for an CNAME record.

AWS_INSTANCE_IPV4

If the service configuration includes an A record, the IPv4 address that you want Route 53 to return in response to DNS queries, for example, 192.0.2.44.

This value is required if the service specified by ServiceId includes settings for an A record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_IPV6

If the service configuration includes an AAAA record, the IPv6 address that you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

This value is required if the service specified by ServiceId includes settings for an AAAA record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_PORT

If the service includes an SRV record, the value that you want Route 53 to return for the port.

If the service includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send requests to.

This value is required if you specified settings for an SRV record when you created the service.

Custom attributes

You can add up to 30 custom attributes. For each key-value pair, the maximum length of the attribute name is 255 characters, and the maximum length of the attribute value is 1,024 characters.

" + "documentation":"

A string map that contains the following information for the service that you specify in ServiceId:

  • The attributes that apply to the records that are defined in the service.

  • For each attribute, the applicable value.

Supported attribute keys include the following:

AWS_ALIAS_DNS_NAME

If you want AWS Cloud Map to create an Amazon Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that is associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget in the Route 53 API Reference.

Note the following:

  • The configuration for the service that is specified by ServiceId must include settings for an A record, an AAAA record, or both.

  • In the service that is specified by ServiceId, the value of RoutingPolicy must be WEIGHTED.

  • If the service that is specified by ServiceId includes HealthCheckConfig settings, AWS Cloud Map will create the Route 53 health check, but it won't associate the health check with the alias record.

  • Auto naming currently doesn't support creating alias records that route traffic to AWS resources other than ELB load balancers.

  • If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values for any of the AWS_INSTANCE attributes.

AWS_INIT_HEALTH_STATUS

If the service configuration includes HealthCheckCustomConfig, you can optionally use AWS_INIT_HEALTH_STATUS to specify the initial status of the custom health check, HEALTHY or UNHEALTHY. If you don't specify a value for AWS_INIT_HEALTH_STATUS, the initial status is HEALTHY.

AWS_INSTANCE_CNAME

If the service configuration includes a CNAME record, the domain name that you want Route 53 to return in response to DNS queries, for example, example.com.

This value is required if the service specified by ServiceId includes settings for an CNAME record.

AWS_INSTANCE_IPV4

If the service configuration includes an A record, the IPv4 address that you want Route 53 to return in response to DNS queries, for example, 192.0.2.44.

This value is required if the service specified by ServiceId includes settings for an A record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_IPV6

If the service configuration includes an AAAA record, the IPv6 address that you want Route 53 to return in response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345.

This value is required if the service specified by ServiceId includes settings for an AAAA record. If the service includes settings for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both.

AWS_INSTANCE_PORT

If the service includes an SRV record, the value that you want Route 53 to return for the port.

If the service includes HealthCheckConfig, the port on the endpoint that you want Route 53 to send requests to.

This value is required if you specified settings for an SRV record or a Route 53 health check when you created the service.

Custom attributes

You can add up to 30 custom attributes. For each key-value pair, the maximum length of the attribute name is 255 characters, and the maximum length of the attribute value is 1,024 characters. Total size of all provided attributes (sum of all keys and values) must not exceed 5,000 characters.

" } } }, @@ -1428,10 +1523,18 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" + "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" } } }, + "RequestLimitExceeded":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The operation can't be completed because you've reached the limit on the number of requests.

", + "exception":true + }, "ResourceCount":{"type":"integer"}, "ResourceDescription":{ "type":"string", @@ -1457,6 +1560,14 @@ "documentation":"

The resource can't be created because you've reached the limit on the number of resources.

", "exception":true }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The operation can't be completed because the resource was not found.

", + "exception":true + }, "ResourcePath":{ "type":"string", "max":255 @@ -1493,7 +1604,7 @@ }, "InstanceCount":{ "shape":"ResourceCount", - "documentation":"

The number of instances that are currently associated with the service. Instances that were previously associated with the service but that have been deleted are not included in the count.

" + "documentation":"

The number of instances that are currently associated with the service. Instances that were previously associated with the service but that have been deleted are not included in the count. The count might not reflect pending registrations and deregistrations.

" }, "DnsConfig":{ "shape":"DnsConfig", @@ -1501,7 +1612,7 @@ }, "HealthCheckConfig":{ "shape":"HealthCheckConfig", - "documentation":"

Public DNS namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, AWS Cloud Map associates the health check with the records that you specify in DnsConfig.

For information about the charges for health checks, see Amazon Route 53 Pricing.

" + "documentation":"

Public DNS and HTTP namespaces only. A complex type that contains settings for an optional health check. If you specify settings for a health check, AWS Cloud Map associates the health check with the records that you specify in DnsConfig.

For information about the charges for health checks, see Amazon Route 53 Pricing.

" }, "HealthCheckCustomConfig":{ "shape":"HealthCheckCustomConfig", @@ -1536,7 +1647,6 @@ }, "ServiceChange":{ "type":"structure", - "required":["DnsConfig"], "members":{ "Description":{ "shape":"ResourceDescription", @@ -1617,7 +1727,7 @@ }, "InstanceCount":{ "shape":"ResourceCount", - "documentation":"

The number of instances that are currently associated with the service. Instances that were previously associated with the service but that have been deleted are not included in the count.

" + "documentation":"

The number of instances that are currently associated with the service. Instances that were previously associated with the service but that have been deleted are not included in the count. The count might not reflect pending registrations and deregistrations.

" }, "DnsConfig":{"shape":"DnsConfig"}, "HealthCheckConfig":{"shape":"HealthCheckConfig"}, @@ -1629,7 +1739,103 @@ }, "documentation":"

A complex type that contains information about a specified service.

" }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key identifier, or name, of the tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The string value that's associated with the key of the tag. You can set the value of a tag to an empty string, but you can't set the value of a tag to null.

" + } + }, + "documentation":"

A custom key-value pair associated with a resource.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to retrieve tags for.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to add to the specified resource. Specifying the tag key is required. You can set the value of a tag to an empty string, but you can't set the value of a tag to null.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, "Timestamp":{"type":"timestamp"}, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ResourceName":{ + "shape":"AmazonResourceName", + "documentation":"

The name of the resource.

" + } + }, + "documentation":"

The list of tags on the resource is over the limit. The maximum number of tags that can be applied to a resource is 50.

", + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the resource that you want to retrieve tags for.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag keys to remove from the specified resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateInstanceCustomHealthStatusRequest":{ "type":"structure", "required":[ @@ -1674,7 +1880,7 @@ "members":{ "OperationId":{ "shape":"OperationId", - "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" + "documentation":"

A value that you can use to determine whether the request completed successfully. To get the status of the operation, see GetOperation.

" } } } diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index 1dec9d073d4f..3ffbb93ed828 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/ses/pom.xml b/services/ses/pom.xml index a137822cb4d0..2228de8df150 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index bd2481fa4f1b..793d817c874c 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sesv2/src/main/resources/codegen-resources/service-2.json b/services/sesv2/src/main/resources/codegen-resources/service-2.json index 9a5139e977a0..803bdfa13481 100644 --- a/services/sesv2/src/main/resources/codegen-resources/service-2.json +++ b/services/sesv2/src/main/resources/codegen-resources/service-2.json @@ -1612,7 +1612,7 @@ "documentation":"

An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.

" } }, - "documentation":"

In the Amazon SES API v2, events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

" + "documentation":"

In the Amazon SES API v2, events include message sends, deliveries, opens, clicks, bounces, complaints and delivery delays. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

" }, "EventDestinationDefinition":{ "type":"structure", @@ -1663,7 +1663,8 @@ "DELIVERY", "OPEN", "CLICK", - "RENDERING_FAILURE" + "RENDERING_FAILURE", + "DELIVERY_DELAY" ] }, "EventTypes":{ diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 2702aa105da5..fa08be0f06e7 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/shield/pom.xml b/services/shield/pom.xml index b25d1cfb0a6e..6da6c0cff279 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/shield/src/main/resources/codegen-resources/paginators-1.json b/services/shield/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..cffb14b68194 100644 --- a/services/shield/src/main/resources/codegen-resources/paginators-1.json +++ b/services/shield/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,16 @@ { "pagination": { + "ListAttacks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "AttackSummaries" + }, + "ListProtections": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Protections" + } } -} +} \ No newline at end of file diff --git a/services/shield/src/main/resources/codegen-resources/service-2.json b/services/shield/src/main/resources/codegen-resources/service-2.json index c9226f462c28..a37e6df3f89e 100644 --- a/services/shield/src/main/resources/codegen-resources/service-2.json +++ b/services/shield/src/main/resources/codegen-resources/service-2.json @@ -31,7 +31,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Authorizes the DDoS Response team (DRT) to access the specified Amazon S3 bucket containing your AWS WAF logs. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" + "documentation":"

Authorizes the DDoS Response Team (DRT) to access the specified Amazon S3 bucket containing your AWS WAF logs. You can associate up to 10 Amazon S3 buckets with your subscription.

To use the services of the DRT and make an AssociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" }, "AssociateDRTRole":{ "name":"AssociateDRTRole", @@ -49,7 +49,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Authorizes the DDoS Response team (DRT), using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the DRT to inspect your AWS WAF configuration and create or update AWS WAF rules and web ACLs.

You can associate only one RoleArn with your subscription. If you submit an AssociateDRTRole request for an account that already has an associated role, the new RoleArn will replace the existing RoleArn.

Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to the role you will specify in the request. For more information see Attaching and Detaching IAM Policies. The role must also trust the service principal drt.shield.amazonaws.com. For more information, see IAM JSON Policy Elements: Principal.

The DRT will have access only to your AWS WAF and Shield resources. By submitting this request, you authorize the DRT to inspect your AWS WAF and Shield configuration and create and update AWS WAF rules and web ACLs on your behalf. The DRT takes these actions only if explicitly authorized by you.

You must have the iam:PassRole permission to make an AssociateDRTRole request. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

To use the services of the DRT and make an AssociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" + "documentation":"

Authorizes the DDoS Response Team (DRT), using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the DRT to inspect your AWS WAF configuration and create or update AWS WAF rules and web ACLs.

You can associate only one RoleArn with your subscription. If you submit an AssociateDRTRole request for an account that already has an associated role, the new RoleArn will replace the existing RoleArn.

Prior to making the AssociateDRTRole request, you must attach the AWSShieldDRTAccessPolicy managed policy to the role you will specify in the request. For more information see Attaching and Detaching IAM Policies. The role must also trust the service principal drt.shield.amazonaws.com. For more information, see IAM JSON Policy Elements: Principal.

The DRT will have access only to your AWS WAF and Shield resources. By submitting this request, you authorize the DRT to inspect your AWS WAF and Shield configuration and create and update AWS WAF rules and web ACLs on your behalf. The DRT takes these actions only if explicitly authorized by you.

You must have the iam:PassRole permission to make an AssociateDRTRole request. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

To use the services of the DRT and make an AssociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan.

" }, "AssociateHealthCheck":{ "name":"AssociateHealthCheck", @@ -68,6 +68,23 @@ ], "documentation":"

Adds health-based detection to the Shield Advanced protection for a resource. Shield Advanced health-based detection uses the health of your AWS resource to improve responsiveness and accuracy in attack detection and mitigation.

You define the health check in Route 53 and then associate it with your Shield Advanced protection. For more information, see Shield Advanced Health-Based Detection in the AWS WAF and AWS Shield Developer Guide.

" }, + "AssociateProactiveEngagementDetails":{ + "name":"AssociateProactiveEngagementDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateProactiveEngagementDetailsRequest"}, + "output":{"shape":"AssociateProactiveEngagementDetailsResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OptimisticLockException"} + ], + "documentation":"

Initializes proactive engagement and sets the list of contacts for the DDoS Response Team (DRT) to use. You must provide at least one phone number in the emergency contact list.

After you have initialized proactive engagement using this call, to disable or enable proactive engagement, use the calls DisableProactiveEngagement and EnableProactiveEngagement.

This call defines the list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you for escalations to the DRT and to initiate proactive customer support.

The contacts that you provide in the request replace any contacts that were already defined. If you already have contacts defined and want to use them, retrieve the list using DescribeEmergencyContactSettings and then provide it to this call.

" + }, "CreateProtection":{ "name":"CreateProtection", "http":{ @@ -99,7 +116,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceAlreadyExistsException"} ], - "documentation":"

Activates AWS Shield Advanced for an account.

As part of this request you can specify EmergencySettings that automaticaly grant the DDoS response team (DRT) needed permissions to assist you during a suspected DDoS attack. For more information see Authorize the DDoS Response Team to Create Rules and Web ACLs on Your Behalf.

To use the services of the DRT, you must be subscribed to the Business Support plan or the Enterprise Support plan.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

" + "documentation":"

Activates AWS Shield Advanced for an account.

When you initally create a subscription, your subscription is set to be automatically renewed at the end of the existing subscription period. You can change this by submitting an UpdateSubscription request.

" }, "DeleteProtection":{ "name":"DeleteProtection", @@ -158,7 +175,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns the current role and list of Amazon S3 log buckets used by the DDoS Response team (DRT) to access your AWS account while assisting with attack mitigation.

" + "documentation":"

Returns the current role and list of Amazon S3 log buckets used by the DDoS Response Team (DRT) to access your AWS account while assisting with attack mitigation.

" }, "DescribeEmergencyContactSettings":{ "name":"DescribeEmergencyContactSettings", @@ -172,7 +189,7 @@ {"shape":"InternalErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the email addresses that the DRT can use to contact you during a suspected attack.

" + "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" }, "DescribeProtection":{ "name":"DescribeProtection", @@ -203,6 +220,23 @@ ], "documentation":"

Provides details about the AWS Shield Advanced subscription for an account.

" }, + "DisableProactiveEngagement":{ + "name":"DisableProactiveEngagement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableProactiveEngagementRequest"}, + "output":{"shape":"DisableProactiveEngagementResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OptimisticLockException"} + ], + "documentation":"

Removes authorization from the DDoS Response Team (DRT) to notify contacts about escalations to the DRT and to initiate proactive customer support.

" + }, "DisassociateDRTLogBucket":{ "name":"DisassociateDRTLogBucket", "http":{ @@ -219,7 +253,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes the DDoS Response team's (DRT) access to the specified Amazon S3 bucket containing your AWS WAF logs.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

" + "documentation":"

Removes the DDoS Response Team's (DRT) access to the specified Amazon S3 bucket containing your AWS WAF logs.

To make a DisassociateDRTLogBucket request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTLogBucket request to remove this access.

" }, "DisassociateDRTRole":{ "name":"DisassociateDRTRole", @@ -235,7 +269,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes the DDoS Response team's (DRT) access to your AWS account.

To make a DisassociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTRole request to remove this access.

" + "documentation":"

Removes the DDoS Response Team's (DRT) access to your AWS account.

To make a DisassociateDRTRole request, you must be subscribed to the Business Support plan or the Enterprise Support plan. However, if you are not subscribed to one of these support plans, but had been previously and had granted the DRT access to your account, you can submit a DisassociateDRTRole request to remove this access.

" }, "DisassociateHealthCheck":{ "name":"DisassociateHealthCheck", @@ -253,6 +287,23 @@ ], "documentation":"

Removes health-based detection from the Shield Advanced protection for a resource. Shield Advanced health-based detection uses the health of your AWS resource to improve responsiveness and accuracy in attack detection and mitigation.

You define the health check in Route 53 and then associate or disassociate it with your Shield Advanced protection. For more information, see Shield Advanced Health-Based Detection in the AWS WAF and AWS Shield Developer Guide.

" }, + "EnableProactiveEngagement":{ + "name":"EnableProactiveEngagement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableProactiveEngagementRequest"}, + "output":{"shape":"EnableProactiveEngagementResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OptimisticLockException"} + ], + "documentation":"

Authorizes the DDoS Response Team (DRT) to use email and phone to notify contacts about escalations to the DRT and to initiate proactive customer support.

" + }, "GetSubscriptionState":{ "name":"GetSubscriptionState", "http":{ @@ -310,7 +361,7 @@ {"shape":"OptimisticLockException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates the details of the list of email addresses that the DRT can use to contact you during a suspected attack.

" + "documentation":"

Updates the details of the list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" }, "UpdateSubscription":{ "name":"UpdateSubscription", @@ -344,7 +395,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

In order to grant the necessary access to the DDoS Response Team, the user submitting the request must have the iam:PassRole permission. This error indicates the user did not have the appropriate permissions. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

", + "documentation":"

In order to grant the necessary access to the DDoS Response Team (DRT), the user submitting the request must have the iam:PassRole permission. This error indicates the user did not have the appropriate permissions. For more information, see Granting a User Permissions to Pass a Role to an AWS Service.

", "exception":true }, "AssociateDRTLogBucketRequest":{ @@ -399,6 +450,21 @@ "members":{ } }, + "AssociateProactiveEngagementDetailsRequest":{ + "type":"structure", + "required":["EmergencyContactList"], + "members":{ + "EmergencyContactList":{ + "shape":"EmergencyContactList", + "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you for escalations to the DRT and to initiate proactive customer support.

To enable proactive engagement, the contact list must include at least one phone number.

The contacts that you provide here replace any contacts that were already defined. If you already have contacts defined and want to use them, retrieve the list using DescribeEmergencyContactSettings and then provide it here.

" + } + } + }, + "AssociateProactiveEngagementDetailsResponse":{ + "type":"structure", + "members":{ + } + }, "AttackDetail":{ "type":"structure", "members":{ @@ -546,6 +612,12 @@ "DISABLED" ] }, + "ContactNotes":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[\\w\\s\\.\\-,:/()+@]*$" + }, "Contributor":{ "type":"structure", "members":{ @@ -670,7 +742,7 @@ "members":{ "EmergencyContactList":{ "shape":"EmergencyContactList", - "documentation":"

A list of email addresses that the DRT can use to contact you during a suspected attack.

" + "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" } } }, @@ -710,6 +782,16 @@ } } }, + "DisableProactiveEngagementRequest":{ + "type":"structure", + "members":{ + } + }, + "DisableProactiveEngagementResponse":{ + "type":"structure", + "members":{ + } + }, "DisassociateDRTLogBucketRequest":{ "type":"structure", "required":["LogBucket"], @@ -774,10 +856,18 @@ "members":{ "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

An email address that the DRT can use to contact you during a suspected attack.

" + "documentation":"

The email address for the contact.

" + }, + "PhoneNumber":{ + "shape":"PhoneNumber", + "documentation":"

The phone number for the contact.

" + }, + "ContactNotes":{ + "shape":"ContactNotes", + "documentation":"

Additional notes regarding the contact.

" } }, - "documentation":"

Contact information that the DRT can use to contact you during a suspected attack.

" + "documentation":"

Contact information that the DRT can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

" }, "EmergencyContactList":{ "type":"list", @@ -785,6 +875,16 @@ "max":10, "min":0 }, + "EnableProactiveEngagementRequest":{ + "type":"structure", + "members":{ + } + }, + "EnableProactiveEngagementResponse":{ + "type":"structure", + "members":{ + } + }, "GetSubscriptionStateRequest":{ "type":"structure", "members":{ @@ -1001,9 +1101,23 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"

Exception that indicates that the protection state has been modified by another client. You can retry the request.

", + "documentation":"

Exception that indicates that the resource state has been modified by another client. Retrieve the resource and then retry your request.

", "exception":true }, + "PhoneNumber":{ + "type":"string", + "max":16, + "min":1, + "pattern":"^\\+[1-9]\\d{1,14}$" + }, + "ProactiveEngagementStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED", + "PENDING" + ] + }, "Protection":{ "type":"structure", "members":{ @@ -1130,6 +1244,10 @@ "Limits":{ "shape":"Limits", "documentation":"

Specifies how many protections of a given type you can create.

" + }, + "ProactiveEngagementStatus":{ + "shape":"ProactiveEngagementStatus", + "documentation":"

If ENABLED, the DDoS Response Team (DRT) will use email and phone to notify contacts about escalations to the DRT and to initiate proactive customer support.

If PENDING, you have requested proactive engagement and the request is pending. The status changes to ENABLED when your request is fully processed.

If DISABLED, the DRT will not proactively notify contacts about escalations or to initiate proactive customer support.

" } }, "documentation":"

Information about the AWS Shield Advanced subscription for an account.

" @@ -1233,7 +1351,7 @@ "members":{ "EmergencyContactList":{ "shape":"EmergencyContactList", - "documentation":"

A list of email addresses that the DRT can use to contact you during a suspected attack.

" + "documentation":"

A list of email addresses and phone numbers that the DDoS Response Team (DRT) can use to contact you if you have proactive engagement enabled, for escalations to the DRT and to initiate proactive customer support.

If you have proactive engagement enabled, the contact list must include at least one phone number.

" } } }, diff --git a/services/signer/pom.xml b/services/signer/pom.xml index 4a9ef1843054..b863df3c72f1 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/sms/pom.xml b/services/sms/pom.xml index 6e3084a6fc24..33af15e3c40a 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index 31e5442ce5e8..f10127d20ede 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/snowball/src/main/resources/codegen-resources/service-2.json b/services/snowball/src/main/resources/codegen-resources/service-2.json index 1063cd63014c..8f1fedb95893 100644 --- a/services/snowball/src/main/resources/codegen-resources/service-2.json +++ b/services/snowball/src/main/resources/codegen-resources/service-2.json @@ -464,7 +464,7 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of AWS Snowball device to use for this cluster. Currently, the only supported device type for cluster jobs is EDGE.

For more information, see Snowball Edge Device Options in the Snowball Edge Developer Guide.

" + "documentation":"

The type of AWS Snowball device to use for this cluster.

For cluster jobs, AWS Snowball currently supports only the EDGE device type.

" }, "CreationDate":{ "shape":"Timestamp", @@ -580,11 +580,11 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of AWS Snowball device to use for this cluster. Currently, the only supported device type for cluster jobs is EDGE.

For more information, see Snowball Edge Device Options in the Snowball Edge Developer Guide.

" + "documentation":"

The type of AWS Snowball device to use for this cluster.

For cluster jobs, AWS Snowball currently supports only the EDGE device type.

" }, "ShippingOption":{ "shape":"ShippingOption", - "documentation":"

The shipping speed for each node in this cluster. This speed doesn't dictate how soon you'll get each Snowball Edge device, rather it represents how quickly each device moves to its destination while in transit. Regional shipping speeds are as follows:

  • In Australia, you have access to express shipping. Typically, devices shipped express are delivered in about a day.

  • In the European Union (EU), you have access to express shipping. Typically, Snowball Edges shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.

  • In India, Snowball Edges are delivered in one to seven days.

  • In the US, you have access to one-day shipping and two-day shipping.

" + "documentation":"

The shipping speed for each node in this cluster. This speed doesn't dictate how soon you'll get each Snowball Edge device, rather it represents how quickly each device moves to its destination while in transit. Regional shipping speeds are as follows:

  • In Australia, you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day.

  • In the European Union (EU), you have access to express shipping. Typically, Snowballs shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.

  • In India, Snowballs are delivered in one to seven days.

  • In the United States of America (US), you have access to one-day shipping and two-day shipping.

  • In Australia, you have access to express shipping. Typically, devices shipped express are delivered in about a day.

  • In the European Union (EU), you have access to express shipping. Typically, Snowball Edges shipped express are delivered in about a day. In addition, most countries in the EU have access to standard shipping, which typically takes less than a week, one way.

  • In India, Snowball Edges are delivered in one to seven days.

  • In the US, you have access to one-day shipping and two-day shipping.

" }, "Notification":{ "shape":"Notification", @@ -654,7 +654,7 @@ }, "SnowballType":{ "shape":"SnowballType", - "documentation":"

The type of AWS Snowball device to use for this job. Currently, the only supported device type for cluster jobs is EDGE.

For more information, see Snowball Edge Device Options in the Snowball Edge Developer Guide.

" + "documentation":"

The type of AWS Snowball device to use for this job.

For cluster jobs, AWS Snowball currently supports only the EDGE device type.

The type of AWS Snowball device to use for this job. Currently, the only supported device type for cluster jobs is EDGE.

For more information, see Snowball Edge Device Options in the Snowball Edge Developer Guide.

" }, "ForwardingAddressId":{ "shape":"AddressId", @@ -663,6 +663,10 @@ "TaxDocuments":{ "shape":"TaxDocuments", "documentation":"

The tax documents required in your AWS Region.

" + }, + "DeviceConfiguration":{ + "shape":"DeviceConfiguration", + "documentation":"

Defines the device configuration for an AWS Snowcone job.

" } } }, @@ -784,6 +788,16 @@ } } }, + "DeviceConfiguration":{ + "type":"structure", + "members":{ + "SnowconeDeviceConfiguration":{ + "shape":"SnowconeDeviceConfiguration", + "documentation":"

Returns information about the device configuration for an AWS Snowcone job.

" + } + }, + "documentation":"

The container for SnowconeDeviceConfiguration.

" + }, "Ec2AmiResource":{ "type":"structure", "required":["AmiId"], @@ -928,7 +942,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Job or cluster creation failed. One ore more inputs were invalid. Confirm that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType, and try again.

", + "documentation":"

Job or cluster creation failed. One or more inputs were invalid. Confirm that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType, and try again.

", "exception":true }, "InvalidJobStateException":{ @@ -1095,7 +1109,8 @@ "TaxDocuments":{ "shape":"TaxDocuments", "documentation":"

The metadata associated with the tax documents required in your AWS Region.

" - } + }, + "DeviceConfiguration":{"shape":"DeviceConfiguration"} }, "documentation":"

Contains information about a specific job including shipping information, job status, and other important metadata. This information is returned as a part of the response syntax of the DescribeJob action.

" }, @@ -1404,6 +1419,8 @@ "T80", "T100", "T42", + "T98", + "T8", "NoPreference" ] }, @@ -1413,9 +1430,21 @@ "STANDARD", "EDGE", "EDGE_C", - "EDGE_CG" + "EDGE_CG", + "EDGE_S", + "SNC1_HDD" ] }, + "SnowconeDeviceConfiguration":{ + "type":"structure", + "members":{ + "WirelessConnection":{ + "shape":"WirelessConnection", + "documentation":"

Configures the wireless connection for the AWS Snowcone device.

" + } + }, + "documentation":"

Specifies the device configuration for an AWS Snowcone job.

" + }, "SnsTopicARN":{ "type":"string", "max":255, @@ -1428,10 +1457,7 @@ "TaxDocuments":{ "type":"structure", "members":{ - "IND":{ - "shape":"INDTaxDocuments", - "documentation":"

The tax documents required in AWS Regions in India.

" - } + "IND":{"shape":"INDTaxDocuments"} }, "documentation":"

The tax documents required in your AWS Region.

" }, @@ -1533,6 +1559,16 @@ "type":"structure", "members":{ } + }, + "WirelessConnection":{ + "type":"structure", + "members":{ + "IsWifiEnabled":{ + "shape":"Boolean", + "documentation":"

Enables the Wi-Fi adapter on an AWS Snowcone device.

" + } + }, + "documentation":"

Configures the wireless connection on an AWS Snowcone device.

" } }, "documentation":"

AWS Snowball is a petabyte-scale data transport solution that uses secure devices to transfer large amounts of data between your on-premises data centers and Amazon Simple Storage Service (Amazon S3). The Snowball commands described here provide access to the same functionality that is available in the AWS Snowball Management Console, which enables you to create and manage jobs for Snowball. To transfer data locally with a Snowball device, you'll need to use the Snowball client or the Amazon S3 API adapter for Snowball. For more information, see the User Guide.

" diff --git a/services/sns/pom.xml b/services/sns/pom.xml index 08630a0f427e..9755ab64bcb2 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/sns/src/it/java/software/amazon/awssdk/services/sns/MobilePushIntegrationTest.java b/services/sns/src/it/java/software/amazon/awssdk/services/sns/MobilePushIntegrationTest.java deleted file mode 100644 index 054b4c4e6610..000000000000 --- a/services/sns/src/it/java/software/amazon/awssdk/services/sns/MobilePushIntegrationTest.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.sns; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.util.HashMap; -import java.util.Map; -import java.util.Random; -import org.junit.Test; -import software.amazon.awssdk.services.sns.model.CreatePlatformApplicationRequest; -import software.amazon.awssdk.services.sns.model.CreatePlatformApplicationResponse; -import software.amazon.awssdk.services.sns.model.CreatePlatformEndpointRequest; -import software.amazon.awssdk.services.sns.model.CreatePlatformEndpointResponse; -import software.amazon.awssdk.services.sns.model.CreateTopicRequest; -import software.amazon.awssdk.services.sns.model.CreateTopicResponse; -import software.amazon.awssdk.services.sns.model.DeleteEndpointRequest; -import software.amazon.awssdk.services.sns.model.DeletePlatformApplicationRequest; -import software.amazon.awssdk.services.sns.model.DeleteTopicRequest; -import software.amazon.awssdk.services.sns.model.Endpoint; -import software.amazon.awssdk.services.sns.model.GetEndpointAttributesRequest; -import software.amazon.awssdk.services.sns.model.GetEndpointAttributesResponse; -import software.amazon.awssdk.services.sns.model.GetPlatformApplicationAttributesRequest; -import software.amazon.awssdk.services.sns.model.GetPlatformApplicationAttributesResponse; -import software.amazon.awssdk.services.sns.model.ListEndpointsByPlatformApplicationRequest; -import software.amazon.awssdk.services.sns.model.ListEndpointsByPlatformApplicationResponse; -import software.amazon.awssdk.services.sns.model.ListPlatformApplicationsRequest; -import software.amazon.awssdk.services.sns.model.ListPlatformApplicationsResponse; -import software.amazon.awssdk.services.sns.model.PlatformApplication; -import software.amazon.awssdk.services.sns.model.PublishRequest; -import software.amazon.awssdk.services.sns.model.PublishResponse; -import software.amazon.awssdk.services.sns.model.SetEndpointAttributesRequest; -import software.amazon.awssdk.services.sns.model.SetPlatformApplicationAttributesRequest; - -public class MobilePushIntegrationTest extends IntegrationTestBase { - - private String platformAppName = "JavaSDKTestApp" + new Random().nextInt(); - private String platformCredential = "AIzaSyD-4pBAk6M7eveE9dwRFyGv-cfYBPiHRmk"; - private String token = - "APA91bHXHl9bxaaNvHHWNXKwzzaeAjJnBP3g6ieaGta1aPMgrilr0H-QL4AxUZUJ-1mk0gnLpmeXF0Kg7-9fBXfXHTKzPGlCyT6E6oOfpdwLpcRMxQp5vCPFiFeru9oQylc22HvZSwQTDgmmw9WdNlXMerUPzmoX0w"; - - /** - * Tests for mobile push API - * - */ - @Test - public void testMobilePushOperations() throws InterruptedException { - - String platformApplicationArn = null; - String endpointArn = null; - String topicArn = null; - - try { - CreateTopicResponse createTopicResult = sns.createTopic(CreateTopicRequest.builder().name("TestTopic").build()); - topicArn = createTopicResult.topicArn(); - - // List platform applications - ListPlatformApplicationsResponse listPlatformAppsResult = - sns.listPlatformApplications(ListPlatformApplicationsRequest.builder().build()); - int platformAppsCount = listPlatformAppsResult.platformApplications().size(); - for (PlatformApplication platformApp : listPlatformAppsResult.platformApplications()) { - assertNotNull(platformApp.platformApplicationArn()); - validateAttributes(platformApp.attributes()); - } - - // Create a platform application for GCM. - Map attributes = new HashMap<>(); - attributes.put("PlatformCredential", platformCredential); - attributes.put("PlatformPrincipal", "NA"); - attributes.put("EventEndpointCreated", topicArn); - attributes.put("EventEndpointDeleted", topicArn); - attributes.put("EventEndpointUpdated", topicArn); - attributes.put("EventDeliveryAttemptFailure", topicArn); - attributes.put("EventDeliveryFailure", ""); - CreatePlatformApplicationResponse createPlatformAppResult = sns - .createPlatformApplication(CreatePlatformApplicationRequest.builder().name(platformAppName) - .platform("GCM").attributes(attributes).build()); - assertNotNull(createPlatformAppResult.platformApplicationArn()); - platformApplicationArn = createPlatformAppResult.platformApplicationArn(); - - Thread.sleep(5 * 1000); - listPlatformAppsResult = sns.listPlatformApplications(ListPlatformApplicationsRequest.builder().build()); - assertEquals(platformAppsCount + 1, listPlatformAppsResult.platformApplications().size()); - - // Get attributes - GetPlatformApplicationAttributesResponse platformAttributesResult = sns.getPlatformApplicationAttributes( - GetPlatformApplicationAttributesRequest.builder().platformApplicationArn(platformApplicationArn).build()); - validateAttributes(platformAttributesResult.attributes()); - - // Set attributes - attributes.clear(); - attributes.put("EventDeliveryFailure", topicArn); - - sns.setPlatformApplicationAttributes(SetPlatformApplicationAttributesRequest.builder() - .platformApplicationArn( - platformApplicationArn) - .attributes(attributes).build()); - - Thread.sleep(1 * 1000); - // Verify attribute change - platformAttributesResult = sns.getPlatformApplicationAttributes( - GetPlatformApplicationAttributesRequest.builder().platformApplicationArn(platformApplicationArn).build()); - validateAttribute(platformAttributesResult.attributes(), "EventDeliveryFailure", topicArn); - - // Create platform endpoint - CreatePlatformEndpointResponse createPlatformEndpointResult = sns.createPlatformEndpoint( - CreatePlatformEndpointRequest.builder().platformApplicationArn(platformApplicationArn) - .customUserData("Custom Data").token(token).build()); - assertNotNull(createPlatformEndpointResult.endpointArn()); - endpointArn = createPlatformEndpointResult.endpointArn(); - - // List platform endpoints - Thread.sleep(5 * 1000); - ListEndpointsByPlatformApplicationResponse listEndpointsResult = sns.listEndpointsByPlatformApplication( - ListEndpointsByPlatformApplicationRequest.builder().platformApplicationArn(platformApplicationArn).build()); - assertTrue(listEndpointsResult.endpoints().size() == 1); - for (Endpoint endpoint : listEndpointsResult.endpoints()) { - assertNotNull(endpoint.endpointArn()); - validateAttributes(endpoint.attributes()); - } - - // Publish to the endpoint - PublishResponse publishResult = sns.publish(PublishRequest.builder().message("Mobile push test message") - .subject("Mobile Push test subject").targetArn(endpointArn) - .build()); - assertNotNull(publishResult.messageId()); - - // Get endpoint attributes - GetEndpointAttributesResponse endpointAttributesResult = sns - .getEndpointAttributes(GetEndpointAttributesRequest.builder().endpointArn(endpointArn).build()); - validateAttributes(endpointAttributesResult.attributes()); - - // Set endpoint attributes - attributes.clear(); - attributes.put("CustomUserData", "Updated Custom Data"); - sns.setEndpointAttributes( - SetEndpointAttributesRequest.builder().endpointArn(endpointArn).attributes(attributes).build()); - - Thread.sleep(1 * 1000); - // Validate set endpoint attributes - endpointAttributesResult = sns - .getEndpointAttributes(GetEndpointAttributesRequest.builder().endpointArn(endpointArn).build()); - validateAttribute(endpointAttributesResult.attributes(), "CustomUserData", "Updated Custom Data"); - - } finally { - if (platformApplicationArn != null) { - if (endpointArn != null) { - // Delete endpoint - sns.deleteEndpoint(DeleteEndpointRequest.builder().endpointArn(endpointArn).build()); - } - // Delete application platform - sns.deletePlatformApplication( - DeletePlatformApplicationRequest.builder().platformApplicationArn(platformApplicationArn).build()); - } - if (topicArn != null) { - // Delete the topic - sns.deleteTopic(DeleteTopicRequest.builder().topicArn(topicArn).build()); - } - } - } - - private void validateAttributes(Map attributes) { - for (Map.Entry attribute : attributes.entrySet()) { - assertNotNull(attribute.getKey()); - assertNotNull(attribute.getValue()); - } - } - - private void validateAttribute(Map attributes, String key, String expectedValue) { - if (attributes.containsKey(key)) { - if (attributes.get(key).equals(expectedValue)) { - return; - } - fail(String.format("The key %s didn't have the expected value %s. Actual value : %s ", key, expectedValue, - attributes.get(key))); - } - fail(String.format("The key %s wasn't present in the Map.", key)); - } -} diff --git a/services/sns/src/main/resources/codegen-resources/service-2.json b/services/sns/src/main/resources/codegen-resources/service-2.json index e470c03fa049..53276fa78285 100755 --- a/services/sns/src/main/resources/codegen-resources/service-2.json +++ b/services/sns/src/main/resources/codegen-resources/service-2.json @@ -83,7 +83,7 @@ {"shape":"InternalErrorException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Creates a platform application object for one of the supported push notification services, such as APNS and FCM, to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action. The PlatformPrincipal is received from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is \"SSL certificate\". For FCM, PlatformPrincipal is not applicable. For ADM, PlatformPrincipal is \"client id\". The PlatformCredential is also received from the notification service. For WNS, PlatformPrincipal is \"Package Security Identifier\". For MPNS, PlatformPrincipal is \"TLS certificate\". For Baidu, PlatformPrincipal is \"API key\".

For APNS/APNS_SANDBOX, PlatformCredential is \"private key\". For FCM, PlatformCredential is \"API key\". For ADM, PlatformCredential is \"client secret\". For WNS, PlatformCredential is \"secret key\". For MPNS, PlatformCredential is \"private key\". For Baidu, PlatformCredential is \"secret key\". The PlatformApplicationArn that is returned when using CreatePlatformApplication is then used as an attribute for the CreatePlatformEndpoint action.

" + "documentation":"

Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action.

PlatformPrincipal and PlatformCredential are received from the notification service.

  • For ADM, PlatformPrincipal is client id and PlatformCredential is client secret.

  • For Baidu, PlatformPrincipal is API key and PlatformCredential is secret key.

  • For APNS and APNS_SANDBOX, PlatformPrincipal is SSL certificate and PlatformCredential is private key.

  • For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal and the PlatformCredential is API key.

  • For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential is private key.

  • For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential is secret key.

You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action.

" }, "CreatePlatformEndpoint":{ "name":"CreatePlatformEndpoint", @@ -102,7 +102,7 @@ {"shape":"AuthorizationErrorException"}, {"shape":"NotFoundException"} ], - "documentation":"

Creates an endpoint for a device and mobile app on one of the supported push notification services, such as FCM and APNS. CreatePlatformEndpoint requires the PlatformApplicationArn that is returned from CreatePlatformApplication. The EndpointArn that is returned when using CreatePlatformEndpoint can then be used by the Publish action to send a message to a mobile app or by the Subscribe action for subscription to a topic. The CreatePlatformEndpoint action is idempotent, so if the requester already owns an endpoint with the same device token and attributes, that endpoint's ARN is returned without creating a new endpoint. For more information, see Using Amazon SNS Mobile Push Notifications.

When using CreatePlatformEndpoint with Baidu, two attributes must be provided: ChannelId and UserId. The token field must also contain the ChannelId. For more information, see Creating an Amazon SNS Endpoint for Baidu.

" + "documentation":"

Creates an endpoint for a device and mobile app on one of the supported push notification services, such as GCM (Firebase Cloud Messaging) and APNS. CreatePlatformEndpoint requires the PlatformApplicationArn that is returned from CreatePlatformApplication. You can use the returned EndpointArn to send a message to a mobile app or by the Subscribe action for subscription to a topic. The CreatePlatformEndpoint action is idempotent, so if the requester already owns an endpoint with the same device token and attributes, that endpoint's ARN is returned without creating a new endpoint. For more information, see Using Amazon SNS Mobile Push Notifications.

When using CreatePlatformEndpoint with Baidu, two attributes must be provided: ChannelId and UserId. The token field must also contain the ChannelId. For more information, see Creating an Amazon SNS Endpoint for Baidu.

" }, "CreateTopic":{ "name":"CreateTopic", @@ -126,7 +126,7 @@ {"shape":"TagPolicyException"}, {"shape":"ConcurrentAccessException"} ], - "documentation":"

Creates a topic to which notifications can be published. Users can create at most 100,000 topics. For more information, see https://aws.amazon.com/sns. This action is idempotent, so if the requester already owns a topic with the specified name, that topic's ARN is returned without creating a new topic.

" + "documentation":"

Creates a topic to which notifications can be published. Users can create at most 100,000 standard topics (at most 1,000 FIFO topics). For more information, see https://aws.amazon.com/sns. This action is idempotent, so if the requester already owns a topic with the specified name, that topic's ARN is returned without creating a new topic.

" }, "DeleteEndpoint":{ "name":"DeleteEndpoint", @@ -154,7 +154,7 @@ {"shape":"InternalErrorException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Deletes a platform application object for one of the supported push notification services, such as APNS and FCM. For more information, see Using Amazon SNS Mobile Push Notifications.

" + "documentation":"

Deletes a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging). For more information, see Using Amazon SNS Mobile Push Notifications.

" }, "DeleteTopic":{ "name":"DeleteTopic", @@ -191,7 +191,7 @@ {"shape":"AuthorizationErrorException"}, {"shape":"NotFoundException"} ], - "documentation":"

Retrieves the endpoint attributes for a device on one of the supported push notification services, such as FCM and APNS. For more information, see Using Amazon SNS Mobile Push Notifications.

" + "documentation":"

Retrieves the endpoint attributes for a device on one of the supported push notification services, such as GCM (Firebase Cloud Messaging) and APNS. For more information, see Using Amazon SNS Mobile Push Notifications.

" }, "GetPlatformApplicationAttributes":{ "name":"GetPlatformApplicationAttributes", @@ -210,7 +210,7 @@ {"shape":"AuthorizationErrorException"}, {"shape":"NotFoundException"} ], - "documentation":"

Retrieves the attributes of the platform application object for the supported push notification services, such as APNS and FCM. For more information, see Using Amazon SNS Mobile Push Notifications.

" + "documentation":"

Retrieves the attributes of the platform application object for the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging). For more information, see Using Amazon SNS Mobile Push Notifications.

" }, "GetSMSAttributes":{ "name":"GetSMSAttributes", @@ -287,7 +287,7 @@ {"shape":"AuthorizationErrorException"}, {"shape":"NotFoundException"} ], - "documentation":"

Lists the endpoints and endpoint attributes for devices in a supported push notification service, such as FCM and APNS. The results for ListEndpointsByPlatformApplication are paginated and return a limited list of endpoints, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListEndpointsByPlatformApplication again using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

This action is throttled at 30 transactions per second (TPS).

" + "documentation":"

Lists the endpoints and endpoint attributes for devices in a supported push notification service, such as GCM (Firebase Cloud Messaging) and APNS. The results for ListEndpointsByPlatformApplication are paginated and return a limited list of endpoints, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListEndpointsByPlatformApplication again using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

This action is throttled at 30 transactions per second (TPS).

" }, "ListPhoneNumbersOptedOut":{ "name":"ListPhoneNumbersOptedOut", @@ -324,7 +324,7 @@ {"shape":"InternalErrorException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Lists the platform application objects for the supported push notification services, such as APNS and FCM. The results for ListPlatformApplications are paginated and return a limited list of applications, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListPlatformApplications using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

This action is throttled at 15 transactions per second (TPS).

" + "documentation":"

Lists the platform application objects for the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging). The results for ListPlatformApplications are paginated and return a limited list of applications, up to 100. If additional records are available after the first page results, then a NextToken string will be returned. To receive the next page, you call ListPlatformApplications using the NextToken string received from the previous call. When there are no more records to return, NextToken will be null. For more information, see Using Amazon SNS Mobile Push Notifications.

This action is throttled at 15 transactions per second (TPS).

" }, "ListSubscriptions":{ "name":"ListSubscriptions", @@ -447,7 +447,7 @@ {"shape":"KMSAccessDeniedException"}, {"shape":"InvalidSecurityException"} ], - "documentation":"

Sends a message to an Amazon SNS topic or sends a text message (SMS message) directly to a phone number.

If you send a message to a topic, Amazon SNS delivers the message to each endpoint that is subscribed to the topic. The format of the message depends on the notification protocol for each subscribed endpoint.

When a messageId is returned, the message has been saved and Amazon SNS will attempt to deliver it shortly.

To use the Publish action for sending a message to a mobile endpoint, such as an app on a Kindle device or mobile phone, you must specify the EndpointArn for the TargetArn parameter. The EndpointArn is returned when making a call with the CreatePlatformEndpoint action.

For more information about formatting messages, see Send Custom Platform-Specific Payloads in Messages to Mobile Devices.

" + "documentation":"

Sends a message to an Amazon SNS topic, a text message (SMS message) directly to a phone number, or a message to a mobile platform endpoint (when you specify the TargetArn).

If you send a message to a topic, Amazon SNS delivers the message to each endpoint that is subscribed to the topic. The format of the message depends on the notification protocol for each subscribed endpoint.

When a messageId is returned, the message has been saved and Amazon SNS will attempt to deliver it shortly.

To use the Publish action for sending a message to a mobile endpoint, such as an app on a Kindle device or mobile phone, you must specify the EndpointArn for the TargetArn parameter. The EndpointArn is returned when making a call with the CreatePlatformEndpoint action.

For more information about formatting messages, see Send Custom Platform-Specific Payloads in Messages to Mobile Devices.

You can publish messages only to topics and endpoints in the same AWS Region.

" }, "RemovePermission":{ "name":"RemovePermission", @@ -477,7 +477,7 @@ {"shape":"AuthorizationErrorException"}, {"shape":"NotFoundException"} ], - "documentation":"

Sets the attributes for an endpoint for a device on one of the supported push notification services, such as FCM and APNS. For more information, see Using Amazon SNS Mobile Push Notifications.

" + "documentation":"

Sets the attributes for an endpoint for a device on one of the supported push notification services, such as GCM (Firebase Cloud Messaging) and APNS. For more information, see Using Amazon SNS Mobile Push Notifications.

" }, "SetPlatformApplicationAttributes":{ "name":"SetPlatformApplicationAttributes", @@ -492,7 +492,7 @@ {"shape":"AuthorizationErrorException"}, {"shape":"NotFoundException"} ], - "documentation":"

Sets the attributes of the platform application object for the supported push notification services, such as APNS and FCM. For more information, see Using Amazon SNS Mobile Push Notifications. For information on configuring attributes for message delivery status, see Using Amazon SNS Application Attributes for Message Delivery Status.

" + "documentation":"

Sets the attributes of the platform application object for the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging). For more information, see Using Amazon SNS Mobile Push Notifications. For information on configuring attributes for message delivery status, see Using Amazon SNS Application Attributes for Message Delivery Status.

" }, "SetSMSAttributes":{ "name":"SetSMSAttributes", @@ -565,7 +565,7 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidSecurityException"} ], - "documentation":"

Prepares to subscribe an endpoint by sending the endpoint a confirmation message. To actually create a subscription, the endpoint owner must call the ConfirmSubscription action with the token from the confirmation message. Confirmation tokens are valid for three days.

This action is throttled at 100 transactions per second (TPS).

" + "documentation":"

Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S or email, or if the endpoint and the topic are not in the same AWS account, the endpoint owner must the ConfirmSubscription action to confirm the subscription.

You call the ConfirmSubscription action with the token from the subscription response. Confirmation tokens are valid for three days.

This action is throttled at 100 transactions per second (TPS).

" }, "TagResource":{ "name":"TagResource", @@ -769,7 +769,7 @@ }, "Platform":{ "shape":"String", - "documentation":"

The following platforms are supported: ADM (Amazon Device Messaging), APNS (Apple Push Notification Service), APNS_SANDBOX, and FCM (Firebase Cloud Messaging).

" + "documentation":"

The following platforms are supported: ADM (Amazon Device Messaging), APNS (Apple Push Notification Service), APNS_SANDBOX, and GCM (Firebase Cloud Messaging).

" }, "Attributes":{ "shape":"MapStringToString", @@ -801,7 +801,7 @@ }, "Token":{ "shape":"String", - "documentation":"

Unique identifier created by the notification service for an app on a device. The specific name for Token will vary, depending on which notification service is being used. For example, when using APNS as the notification service, you need the device token. Alternatively, when using FCM or ADM, the device token equivalent is called the registration ID.

" + "documentation":"

Unique identifier created by the notification service for an app on a device. The specific name for Token will vary, depending on which notification service is being used. For example, when using APNS as the notification service, you need the device token. Alternatively, when using GCM (Firebase Cloud Messaging) or ADM, the device token equivalent is called the registration ID.

" }, "CustomUserData":{ "shape":"String", @@ -820,11 +820,11 @@ "members":{ "Name":{ "shape":"topicName", - "documentation":"

The name of the topic you want to create.

Constraints: Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.

" + "documentation":"

The name of the topic you want to create.

Constraints: Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.

For a FIFO (first-in-first-out) topic, the name must end with the .fifo suffix.

" }, "Attributes":{ "shape":"TopicAttributesMap", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateTopic action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • DisplayName – The display name to use for a topic with SMS subscriptions.

  • Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

" + "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateTopic action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • DisplayName – The display name to use for a topic with SMS subscriptions.

  • FifoTopic – Set to true to create a FIFO topic.

  • Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

The following attribute applies only to FIFO topics:

  • ContentBasedDeduplication – Enables content-based deduplication. Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

" }, "Tags":{ "shape":"TagList", @@ -1000,7 +1000,7 @@ "members":{ "Attributes":{ "shape":"SubscriptionAttributesMap", - "documentation":"

A map of the subscription's attributes. Attributes in this map include the following:

  • ConfirmationWasAuthenticatedtrue if the subscription confirmation request was authenticated.

  • DeliveryPolicy – The JSON serialization of the subscription's delivery policy.

  • EffectiveDeliveryPolicy – The JSON serialization of the effective delivery policy that takes into account the topic delivery policy and account system defaults.

  • FilterPolicy – The filter policy JSON that is assigned to the subscription.

  • Owner – The AWS account ID of the subscription's owner.

  • PendingConfirmationtrue if the subscription hasn't been confirmed. To confirm a pending subscription, call the ConfirmSubscription action with a confirmation token.

  • RawMessageDeliverytrue if raw message delivery is enabled for the subscription. Raw messages are free of JSON formatting and can be sent to HTTP/S and Amazon SQS endpoints.

  • RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.

  • SubscriptionArn – The subscription's ARN.

  • TopicArn – The topic ARN that the subscription is associated with.

" + "documentation":"

A map of the subscription's attributes. Attributes in this map include the following:

  • ConfirmationWasAuthenticatedtrue if the subscription confirmation request was authenticated.

  • DeliveryPolicy – The JSON serialization of the subscription's delivery policy.

  • EffectiveDeliveryPolicy – The JSON serialization of the effective delivery policy that takes into account the topic delivery policy and account system defaults.

  • FilterPolicy – The filter policy JSON that is assigned to the subscription. For more information, see Amazon SNS Message Filtering in the Amazon SNS Developer Guide.

  • Owner – The AWS account ID of the subscription's owner.

  • PendingConfirmationtrue if the subscription hasn't been confirmed. To confirm a pending subscription, call the ConfirmSubscription action with a confirmation token.

  • RawMessageDeliverytrue if raw message delivery is enabled for the subscription. Raw messages are free of JSON formatting and can be sent to HTTP/S and Amazon SQS endpoints.

  • RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.

  • SubscriptionArn – The subscription's ARN.

  • TopicArn – The topic ARN that the subscription is associated with.

" } }, "documentation":"

Response for GetSubscriptionAttributes action.

" @@ -1021,7 +1021,7 @@ "members":{ "Attributes":{ "shape":"TopicAttributesMap", - "documentation":"

A map of the topic's attributes. Attributes in this map include the following:

  • DeliveryPolicy – The JSON serialization of the topic's delivery policy.

  • DisplayName – The human-readable name used in the From field for notifications to email and email-json endpoints.

  • Owner – The AWS account ID of the topic's owner.

  • Policy – The JSON serialization of the topic's access control policy.

  • SubscriptionsConfirmed – The number of confirmed subscriptions for the topic.

  • SubscriptionsDeleted – The number of deleted subscriptions for the topic.

  • SubscriptionsPending – The number of subscriptions pending confirmation for the topic.

  • TopicArn – The topic's ARN.

  • EffectiveDeliveryPolicy – Yhe JSON serialization of the effective delivery policy, taking system defaults into account.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

" + "documentation":"

A map of the topic's attributes. Attributes in this map include the following:

  • DeliveryPolicy – The JSON serialization of the topic's delivery policy.

  • DisplayName – The human-readable name used in the From field for notifications to email and email-json endpoints.

  • Owner – The AWS account ID of the topic's owner.

  • Policy – The JSON serialization of the topic's access control policy.

  • SubscriptionsConfirmed – The number of confirmed subscriptions for the topic.

  • SubscriptionsDeleted – The number of deleted subscriptions for the topic.

  • SubscriptionsPending – The number of subscriptions pending confirmation for the topic.

  • TopicArn – The topic's ARN.

  • EffectiveDeliveryPolicy – The JSON serialization of the effective delivery policy, taking system defaults into account.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

" } }, "documentation":"

Response for GetTopicAttributes action.

" @@ -1550,7 +1550,7 @@ }, "Attributes":{ "shape":"MapStringToString", - "documentation":"

A map of the platform application attributes. Attributes in this map include the following:

  • PlatformCredential – The credential received from the notification service. For APNS/APNS_SANDBOX, PlatformCredential is private key. For FCM, PlatformCredential is \"API key\". For ADM, PlatformCredential is \"client secret\".

  • PlatformPrincipal – The principal received from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is SSL certificate. For FCM, PlatformPrincipal is not applicable. For ADM, PlatformPrincipal is \"client id\".

  • EventEndpointCreated – Topic ARN to which EndpointCreated event notifications should be sent.

  • EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications should be sent.

  • EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications should be sent.

  • EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications should be sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.

  • SuccessFeedbackRoleArn – IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.

  • FailureFeedbackRoleArn – IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.

  • SuccessFeedbackSampleRate – Sample rate percentage (0-100) of successfully delivered messages.

" + "documentation":"

A map of the platform application attributes. Attributes in this map include the following:

  • PlatformCredential – The credential received from the notification service. For APNS and APNS_SANDBOX, PlatformCredential is private key. For GCM (Firebase Cloud Messaging), PlatformCredential is API key. For ADM, PlatformCredential is client secret.

  • PlatformPrincipal – The principal received from the notification service. For APNS and APNS_SANDBOX, PlatformPrincipal is SSL certificate. For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal. For ADM, PlatformPrincipal is client id.

  • EventEndpointCreated – Topic ARN to which EndpointCreated event notifications are sent.

  • EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications are sent.

  • EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications are sent.

  • EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications are sent upon Direct Publish delivery failure (permanent) to one of the application's endpoints.

  • SuccessFeedbackRoleArn – IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.

  • FailureFeedbackRoleArn – IAM role ARN used to give Amazon SNS write access to use CloudWatch Logs on your behalf.

  • SuccessFeedbackSampleRate – Sample rate percentage (0-100) of successfully delivered messages.

" } }, "documentation":"

Input for SetPlatformApplicationAttributes action.

" @@ -1607,7 +1607,7 @@ }, "AttributeName":{ "shape":"attributeName", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • DisplayName – The display name to use for a topic with SMS subscriptions.

  • Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

" + "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses:

  • DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints.

  • DisplayName – The display name to use for a topic with SMS subscriptions.

  • Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic.

The following attribute applies only to server-side-encryption:

  • KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the AWS Key Management Service API Reference.

The following attribute applies only to FIFO topics:

  • ContentBasedDeduplication – Enables content-based deduplication. Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

" }, "AttributeValue":{ "shape":"attributeValue", @@ -1647,7 +1647,7 @@ }, "Endpoint":{ "shape":"endpoint", - "documentation":"

The endpoint that you want to receive notifications. Endpoints vary by protocol:

  • For the http protocol, the endpoint is an URL beginning with http://

  • For the https protocol, the endpoint is a URL beginning with https://

  • For the email protocol, the endpoint is an email address

  • For the email-json protocol, the endpoint is an email address

  • For the sms protocol, the endpoint is a phone number of an SMS-enabled device

  • For the sqs protocol, the endpoint is the ARN of an Amazon SQS queue

  • For the application protocol, the endpoint is the EndpointArn of a mobile app and device.

  • For the lambda protocol, the endpoint is the ARN of an Amazon Lambda function.

" + "documentation":"

The endpoint that you want to receive notifications. Endpoints vary by protocol:

  • For the http protocol, the (public) endpoint is a URL beginning with http://

  • For the https protocol, the (public) endpoint is a URL beginning with https://

  • For the email protocol, the endpoint is an email address

  • For the email-json protocol, the endpoint is an email address

  • For the sms protocol, the endpoint is a phone number of an SMS-enabled device

  • For the sqs protocol, the endpoint is the ARN of an Amazon SQS queue

  • For the application protocol, the endpoint is the EndpointArn of a mobile app and device.

  • For the lambda protocol, the endpoint is the ARN of an Amazon Lambda function.

" }, "Attributes":{ "shape":"SubscriptionAttributesMap", @@ -1655,7 +1655,7 @@ }, "ReturnSubscriptionArn":{ "shape":"boolean", - "documentation":"

Sets whether the response from the Subscribe request includes the subscription ARN, even if the subscription is not yet confirmed.

  • If you have the subscription ARN returned, the response includes the ARN in all cases, even if the subscription is not yet confirmed.

  • If you don't have the subscription ARN returned, in addition to the ARN for confirmed subscriptions, the response also includes the pending subscription ARN value for subscriptions that aren't yet confirmed. A subscription becomes confirmed when the subscriber calls the ConfirmSubscription action with a confirmation token.

If you set this parameter to true, .

The default value is false.

" + "documentation":"

Sets whether the response from the Subscribe request includes the subscription ARN, even if the subscription is not yet confirmed.

  • If you set this parameter to true, the response includes the ARN in all cases, even if the subscription is not yet confirmed. In addition to the ARN for confirmed subscriptions, the response also includes the pending subscription ARN value for subscriptions that aren't yet confirmed. A subscription becomes confirmed when the subscriber calls the ConfirmSubscription action with a confirmation token.

The default value is false.

" } }, "documentation":"

Input for Subscribe action.

" diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index 783cb3283870..4ed5eda03815 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/sqs/src/main/resources/codegen-resources/paginators-1.json b/services/sqs/src/main/resources/codegen-resources/paginators-1.json index 4d5fe76b9718..7cae474a5724 100644 --- a/services/sqs/src/main/resources/codegen-resources/paginators-1.json +++ b/services/sqs/src/main/resources/codegen-resources/paginators-1.json @@ -1,6 +1,15 @@ { "pagination": { + "ListDeadLetterSourceQueues": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "queueUrls" + }, "ListQueues": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", "result_key": "QueueUrls" } } diff --git a/services/sqs/src/main/resources/codegen-resources/service-2.json b/services/sqs/src/main/resources/codegen-resources/service-2.json index 2a50ef13339d..54045ae260ae 100755 --- a/services/sqs/src/main/resources/codegen-resources/service-2.json +++ b/services/sqs/src/main/resources/codegen-resources/service-2.json @@ -22,7 +22,7 @@ "errors":[ {"shape":"OverLimit"} ], - "documentation":"

Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

  • AddPermission generates a policy for you. You can use SetQueueAttributes to upload your policy. For more information, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon Simple Queue Service Developer Guide.

  • An Amazon SQS policy can have a maximum of 7 actions.

  • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

Adds a permission to a queue for a specific principal. This allows sharing access to the queue.

When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon Simple Queue Service Developer Guide.

  • AddPermission generates a policy for you. You can use SetQueueAttributes to upload your policy. For more information, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon Simple Queue Service Developer Guide.

  • An Amazon SQS policy can have a maximum of 7 actions.

  • To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "ChangeMessageVisibility":{ "name":"ChangeMessageVisibility", @@ -54,7 +54,7 @@ {"shape":"BatchEntryIdsNotDistinct"}, {"shape":"InvalidBatchEntryId"} ], - "documentation":"

Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility. The result of the action on each message is reported individually in the response. You can send up to 10 ChangeMessageVisibility requests with each ChangeMessageVisibilityBatch action.

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

" + "documentation":"

Changes the visibility timeout of multiple messages. This is a batch version of ChangeMessageVisibility. The result of the action on each message is reported individually in the response. You can send up to 10 ChangeMessageVisibility requests with each ChangeMessageVisibilityBatch action.

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

" }, "CreateQueue":{ "name":"CreateQueue", @@ -71,7 +71,7 @@ {"shape":"QueueDeletedRecently"}, {"shape":"QueueNameExists"} ], - "documentation":"

Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following caveats in mind:

  • If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon Simple Queue Service Developer Guide.

  • If you don't provide a value for an attribute, the queue is created with the default value for the attribute.

  • If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

  • If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue.

  • If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind:

  • If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon Simple Queue Service Developer Guide.

  • If you don't provide a value for an attribute, the queue is created with the default value for the attribute.

  • If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.

After you create a queue, you must wait at least one second after the queue is created to be able to use the queue.

To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names:

  • If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue.

  • If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "DeleteMessage":{ "name":"DeleteMessage", @@ -103,7 +103,7 @@ {"shape":"BatchEntryIdsNotDistinct"}, {"shape":"InvalidBatchEntryId"} ], - "documentation":"

Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage. The result of the action on each message is reported individually in the response.

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

" + "documentation":"

Deletes up to ten messages from the specified queue. This is a batch version of DeleteMessage. The result of the action on each message is reported individually in the response.

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

" }, "DeleteQueue":{ "name":"DeleteQueue", @@ -112,7 +112,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteQueueRequest"}, - "documentation":"

Deletes the queue specified by the QueueUrl, regardless of the queue's contents. If the specified queue doesn't exist, Amazon SQS returns a successful response.

Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

Deletes the queue specified by the QueueUrl, regardless of the queue's contents.

Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available.

When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist.

When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.

Cross-account permissions don't apply to this action. For more information, see Grant Cross-Account Permissions to a Role and a User Name in the Amazon Simple Queue Service Developer Guide.

" }, "GetQueueAttributes":{ "name":"GetQueueAttributes", @@ -128,7 +128,7 @@ "errors":[ {"shape":"InvalidAttributeName"} ], - "documentation":"

Gets attributes for the specified queue.

To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

" + "documentation":"

Gets attributes for the specified queue.

To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

" }, "GetQueueUrl":{ "name":"GetQueueUrl", @@ -262,7 +262,7 @@ {"shape":"InvalidBatchEntryId"}, {"shape":"UnsupportedOperation"} ], - "documentation":"

Delivers up to ten messages to the specified queue. This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.

The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KB (262,144 bytes).

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&Attribute.1=first

&Attribute.2=second

" + "documentation":"

Delivers up to ten messages to the specified queue. This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.

The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KB (262,144 bytes).

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:

#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF

Any characters not included in this list will be rejected. For more information, see the W3C specification for characters.

If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.

Some actions take lists of parameters. These lists are specified using the param.n notation. Values of n are integers starting from 1. For example, a parameter list with two elements looks like this:

&AttributeName.1=first

&AttributeName.2=second

" }, "SetQueueAttributes":{ "name":"SetQueueAttributes", @@ -386,7 +386,7 @@ }, "SenderFault":{ "shape":"Boolean", - "documentation":"

Specifies whether the error happened due to the producer.

" + "documentation":"

Specifies whether the error happened due to the caller of the batch API action.

" }, "Code":{ "shape":"String", @@ -416,6 +416,10 @@ } }, "Boolean":{"type":"boolean"}, + "BoxedInteger":{ + "type":"integer", + "box":true + }, "ChangeMessageVisibilityBatchRequest":{ "type":"structure", "required":[ @@ -443,7 +447,7 @@ "members":{ "Id":{ "shape":"String", - "documentation":"

An identifier for this particular receipt handle used to communicate the result.

The Ids of a batch request need to be unique within a request

" + "documentation":"

An identifier for this particular receipt handle used to communicate the result.

The Ids of a batch request need to be unique within a request.

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

" }, "ReceiptHandle":{ "shape":"String", @@ -533,7 +537,7 @@ }, "Attributes":{ "shape":"QueueAttributeMap", - "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

  • DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). Default: 0.

  • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

  • MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days).

  • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

  • RedrivePolicy - The string that includes the parameters for the dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout - The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue - Designates a queue as FIFO. Valid values: true, false. If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly.

    For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

  • ContentBasedDeduplication - Enables content-based deduplication. Valid values: true, false. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

", + "documentation":"

A map of attributes with their corresponding values.

The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:

  • DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). Default: 0.

  • MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

  • MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days).

  • Policy – The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

  • RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue – Designates a queue as FIFO. Valid values: true, false. If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly.

    For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

  • ContentBasedDeduplication – Enables content-based deduplication. Valid values: true, false. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

", "locationName":"Attribute" }, "tags":{ @@ -581,7 +585,7 @@ "members":{ "Id":{ "shape":"String", - "documentation":"

An identifier for this particular receipt handle. This is used to communicate the result.

The Ids of a batch request need to be unique within a request

" + "documentation":"

An identifier for this particular receipt handle. This is used to communicate the result.

The Ids of a batch request need to be unique within a request.

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

" }, "ReceiptHandle":{ "shape":"String", @@ -686,7 +690,7 @@ }, "AttributeNames":{ "shape":"AttributeNameList", - "documentation":"

A list of attributes for which to retrieve information.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

The following attributes are supported:

  • All - Returns all values.

  • ApproximateNumberOfMessages - Returns the approximate number of messages available for retrieval from the queue.

  • ApproximateNumberOfMessagesDelayed - Returns the approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter.

  • ApproximateNumberOfMessagesNotVisible - Returns the approximate number of messages that are in flight. Messages are considered to be in flight if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window.

  • CreatedTimestamp - Returns the time when the queue was created in seconds (epoch time).

  • DelaySeconds - Returns the default delay on the queue in seconds.

  • LastModifiedTimestamp - Returns the time when the queue was last changed in seconds (epoch time).

  • MaximumMessageSize - Returns the limit of how many bytes a message can contain before Amazon SQS rejects it.

  • MessageRetentionPeriod - Returns the length of time, in seconds, for which Amazon SQS retains a message.

  • Policy - Returns the policy of the queue.

  • QueueArn - Returns the Amazon resource name (ARN) of the queue.

  • ReceiveMessageWaitTimeSeconds - Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive.

  • RedrivePolicy - Returns the string that includes the parameters for dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

  • VisibilityTimeout - Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - Returns the ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms.

  • KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue - Returns whether the queue is FIFO. For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

    To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

  • ContentBasedDeduplication - Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

A list of attributes for which to retrieve information.

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

The following attributes are supported:

The ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, and ApproximateNumberOfMessagesVisible metrics may not achieve consistency until at least 1 minute after the producers stop sending messages. This period is required for the queue metadata to reach eventual consistency.

  • All – Returns all values.

  • ApproximateNumberOfMessages – Returns the approximate number of messages available for retrieval from the queue.

  • ApproximateNumberOfMessagesDelayed – Returns the approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter.

  • ApproximateNumberOfMessagesNotVisible – Returns the approximate number of messages that are in flight. Messages are considered to be in flight if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window.

  • CreatedTimestamp – Returns the time when the queue was created in seconds (epoch time).

  • DelaySeconds – Returns the default delay on the queue in seconds.

  • LastModifiedTimestamp – Returns the time when the queue was last changed in seconds (epoch time).

  • MaximumMessageSize – Returns the limit of how many bytes a message can contain before Amazon SQS rejects it.

  • MessageRetentionPeriod – Returns the length of time, in seconds, for which Amazon SQS retains a message.

  • Policy – Returns the policy of the queue.

  • QueueArn – Returns the Amazon resource name (ARN) of the queue.

  • ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive.

  • RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

  • VisibilityTimeout – Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId – Returns the ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms.

  • KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. For more information, see How Does the Data Key Reuse Period Work?.

The following attributes apply only to FIFO (first-in-first-out) queues:

  • FifoQueue – Returns whether the queue is FIFO. For more information, see FIFO Queue Logic in the Amazon Simple Queue Service Developer Guide.

    To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

  • ContentBasedDeduplication – Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

" } }, "documentation":"

" @@ -768,6 +772,14 @@ "QueueUrl":{ "shape":"String", "documentation":"

The URL of a dead-letter queue.

Queue URLs and names are case-sensitive.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Pagination token to request the next set of results.

" + }, + "MaxResults":{ + "shape":"BoxedInteger", + "documentation":"

Maximum number of results to include in the response.

" } }, "documentation":"

" @@ -779,6 +791,10 @@ "queueUrls":{ "shape":"QueueUrlList", "documentation":"

A list of source queue URLs that have the RedrivePolicy queue attribute configured with a dead-letter queue.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Pagination token to include in the next request.

" } }, "documentation":"

A list of your dead letter source queues.

" @@ -809,6 +825,14 @@ "QueueNamePrefix":{ "shape":"String", "documentation":"

A string to use for filtering the list results. Only those queues whose name begins with the specified string are returned.

Queue URLs and names are case-sensitive.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Pagination token to request the next set of results.

" + }, + "MaxResults":{ + "shape":"BoxedInteger", + "documentation":"

Maximum number of results to include in the response.

" } }, "documentation":"

" @@ -818,7 +842,11 @@ "members":{ "QueueUrls":{ "shape":"QueueUrlList", - "documentation":"

A list of queue URLs, up to 1,000 entries.

" + "documentation":"

A list of queue URLs, up to 1,000 entries, or the value of MaxResults that you sent in the request.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

Pagination token to include in the next request.

" } }, "documentation":"

A list of your queues.

" @@ -1136,7 +1164,7 @@ }, "AttributeNames":{ "shape":"AttributeNameList", - "documentation":"

A list of attributes that need to be returned along with each message. These attributes include:

  • All - Returns all values.

  • ApproximateFirstReceiveTimestamp - Returns the time the message was first received from the queue (epoch time in milliseconds).

  • ApproximateReceiveCount - Returns the number of times a message has been received from the queue but not deleted.

  • AWSTraceHeader - Returns the AWS X-Ray trace header string.

  • SenderId

    • For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R.

    • For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

  • SentTimestamp - Returns the time the message was sent to the queue (epoch time in milliseconds).

  • MessageDeduplicationId - Returns the value provided by the producer that calls the SendMessage action.

  • MessageGroupId - Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence.

  • SequenceNumber - Returns the value provided by Amazon SQS.

" + "documentation":"

A list of attributes that need to be returned along with each message. These attributes include:

  • All – Returns all values.

  • ApproximateFirstReceiveTimestamp – Returns the time the message was first received from the queue (epoch time in milliseconds).

  • ApproximateReceiveCount – Returns the number of times a message has been received across all queues but not deleted.

  • AWSTraceHeader – Returns the AWS X-Ray trace header string.

  • SenderId

    • For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R.

    • For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

  • SentTimestamp – Returns the time the message was sent to the queue (epoch time in milliseconds).

  • MessageDeduplicationId – Returns the value provided by the producer that calls the SendMessage action.

  • MessageGroupId – Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence.

  • SequenceNumber – Returns the value provided by Amazon SQS.

" }, "MessageAttributeNames":{ "shape":"MessageAttributeNameList", @@ -1152,11 +1180,11 @@ }, "WaitTimeSeconds":{ "shape":"Integer", - "documentation":"

The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds. If no messages are available and the wait time expires, the call returns successfully with an empty list of messages.

" + "documentation":"

The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds. If no messages are available and the wait time expires, the call returns successfully with an empty list of messages.

To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage requests is longer than the WaitTimeSeconds parameter. For example, with the Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients.

" }, "ReceiveRequestAttemptId":{ "shape":"String", - "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, you can retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.

  • You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.

  • When you set FifoQueue, a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.

  • If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, Amazon SQS generates a ReceiveRequestAttemptId.

  • You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).

  • During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

    If a caller of the ReceiveMessage action still processes messages when the visibility timeout expires and messages become visible, another worker consuming from the same queue can receive the same messages and therefore process duplicates. Also, if a consumer whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error.

    To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.

  • While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.

  • If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.

The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, it is possible to retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.

  • You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.

  • When you set FifoQueue, a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.

  • If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, Amazon SQS generates a ReceiveRequestAttemptId.

  • It is possible to retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).

  • During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

    If a caller of the ReceiveMessage action still processes messages when the visibility timeout expires and messages become visible, another worker consuming from the same queue can receive the same messages and therefore process duplicates. Also, if a consumer whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error.

    To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.

  • While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.

  • If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.

The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide.

" } }, "documentation":"

" @@ -1216,7 +1244,7 @@ "members":{ "Id":{ "shape":"String", - "documentation":"

An identifier for a message in this batch used to communicate the result.

The Ids of a batch request need to be unique within a request

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

" + "documentation":"

An identifier for a message in this batch used to communicate the result.

The Ids of a batch request need to be unique within a request.

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

" }, "MessageBody":{ "shape":"String", @@ -1233,7 +1261,7 @@ }, "MessageSystemAttributes":{ "shape":"MessageBodySystemAttributeMap", - "documentation":"

The message system attribute to send Each message system attribute consists of a Name, Type, and Value.

  • Currently, the only supported message system attribute is AWSTraceHeader. Its type must be String and its value must be a correctly formatted AWS X-Ray trace string.

  • The size of a message system attribute doesn't count towards the total size of a message.

", + "documentation":"

The message system attribute to send Each message system attribute consists of a Name, Type, and Value.

  • Currently, the only supported message system attribute is AWSTraceHeader. Its type must be String and its value must be a correctly formatted AWS X-Ray trace header string.

  • The size of a message system attribute doesn't count towards the total size of a message.

", "locationName":"MessageSystemAttribute" }, "MessageDeduplicationId":{ @@ -1342,12 +1370,12 @@ }, "MessageSystemAttributes":{ "shape":"MessageBodySystemAttributeMap", - "documentation":"

The message system attribute to send. Each message system attribute consists of a Name, Type, and Value.

  • Currently, the only supported message system attribute is AWSTraceHeader. Its type must be String and its value must be a correctly formatted AWS X-Ray trace string.

  • The size of a message system attribute doesn't count towards the total size of a message.

", + "documentation":"

The message system attribute to send. Each message system attribute consists of a Name, Type, and Value.

  • Currently, the only supported message system attribute is AWSTraceHeader. Its type must be String and its value must be a correctly formatted AWS X-Ray trace header string.

  • The size of a message system attribute doesn't count towards the total size of a message.

", "locationName":"MessageSystemAttribute" }, "MessageDeduplicationId":{ "shape":"String", - "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

  • Every message must have a unique MessageDeduplicationId,

    • You may provide a MessageDeduplicationId explicitly.

    • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

    • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

    • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

" + "documentation":"

This parameter applies only to FIFO (first-in-first-out) queues.

The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

  • Every message must have a unique MessageDeduplicationId,

    • You may provide a MessageDeduplicationId explicitly.

    • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

    • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

    • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

  • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

  • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues).

If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

The maximum length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide.

" }, "MessageGroupId":{ "shape":"String", @@ -1395,7 +1423,7 @@ }, "Attributes":{ "shape":"QueueAttributeMap", - "documentation":"

A map of attributes to set.

The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:

  • DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). Default: 0.

  • MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

  • MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days).

  • Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: an integer from 0 to 20 (seconds). Default: 0.

  • RedrivePolicy - The string that includes the parameters for the dead-letter queue functionality of the source queue. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount - The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout - The visibility timeout for the queue, in seconds. Valid values: an integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attribute applies only to FIFO (first-in-first-out) queues:

  • ContentBasedDeduplication - Enables content-based deduplication. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

", + "documentation":"

A map of attributes to set.

The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:

  • DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). Default: 0.

  • MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

  • MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days).

  • Policy – The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide.

  • ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

  • RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. For more information about the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter Queues in the Amazon Simple Queue Service Developer Guide.

    • deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

    • maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue.

    The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.

  • VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide.

The following attributes apply only to server-side-encryption:

  • KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the AWS Key Management Service API Reference.

  • KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?.

The following attribute applies only to FIFO (first-in-first-out) queues:

  • ContentBasedDeduplication – Enables content-based deduplication. For more information, see Exactly-Once Processing in the Amazon Simple Queue Service Developer Guide.

    • Every message must have a unique MessageDeduplicationId,

      • You may provide a MessageDeduplicationId explicitly.

      • If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).

      • If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.

      • If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.

    • When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.

    • If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered.

", "locationName":"Attribute" } }, @@ -1449,6 +1477,7 @@ } }, "TagValue":{"type":"string"}, + "Token":{"type":"string"}, "TooManyEntriesInBatchRequest":{ "type":"structure", "members":{ diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index 8750be8b53ae..8d820e80cca6 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssm/src/main/resources/codegen-resources/service-2.json b/services/ssm/src/main/resources/codegen-resources/service-2.json index 0415aaf95f6b..df83a30f5f91 100644 --- a/services/ssm/src/main/resources/codegen-resources/service-2.json +++ b/services/ssm/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"TooManyTagsError"}, {"shape":"TooManyUpdates"} ], - "documentation":"

Adds or overwrites one or more tags for the specified resource. Tags are metadata that you can assign to your documents, managed instances, maintenance windows, Parameter Store parameters, and patch baselines. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. For example, you could define a set of tags for your account's managed instances that helps you track each instance's owner and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. Or Key=Stack and Value=Production, Pre-Production, or Test.

Each resource can have a maximum of 50 tags.

We recommend that you devise a set of tag keys that meets your needs for each resource type. Using a consistent set of tag keys makes it easier for you to manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to Amazon EC2 and are interpreted strictly as a string of characters.

For more information about tags, see Tagging Your Amazon EC2 Resources in the Amazon EC2 User Guide.

" + "documentation":"

Adds or overwrites one or more tags for the specified resource. Tags are metadata that you can assign to your documents, managed instances, maintenance windows, Parameter Store parameters, and patch baselines. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. For example, you could define a set of tags for your account's managed instances that helps you track each instance's owner and stack level. For example: Key=Owner and Value=DbAdmin, SysAdmin, or Dev. Or Key=Stack and Value=Production, Pre-Production, or Test.

Each resource can have a maximum of 50 tags.

We recommend that you devise a set of tag keys that meets your needs for each resource type. Using a consistent set of tag keys makes it easier for you to manage your resources. You can search and filter the resources based on the tags you add. Tags don't have any semantic meaning to and are interpreted strictly as a string of characters.

For more information about using tags with EC2 instances, see Tagging your Amazon EC2 resources in the Amazon EC2 User Guide.

" }, "CancelCommand":{ "name":"CancelCommand", @@ -71,7 +71,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Generates an activation code and activation ID you can use to register your on-premises server or virtual machine (VM) with Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises instances and VMs using Systems Manager, see Setting Up AWS Systems Manager for Hybrid Environments in the AWS Systems Manager User Guide.

On-premises servers or VMs that are registered with Systems Manager and Amazon EC2 instances that you manage with Systems Manager are all called managed instances.

" + "documentation":"

Generates an activation code and activation ID you can use to register your on-premises server or virtual machine (VM) with Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises instances and VMs using Systems Manager, see Setting up AWS Systems Manager for hybrid environments in the AWS Systems Manager User Guide.

On-premises servers or VMs that are registered with Systems Manager and EC2 instances that you manage with Systems Manager are all called managed instances.

" }, "CreateAssociation":{ "name":"CreateAssociation", @@ -94,7 +94,7 @@ {"shape":"InvalidTarget"}, {"shape":"InvalidSchedule"} ], - "documentation":"

Associates the specified Systems Manager document with the specified instances or targets.

When you associate a document with one or more instances using instance IDs or tags, SSM Agent running on the instance processes the document and configures the instance as specified.

If you associate a document with an instance that already has an associated document, the system returns the AssociationAlreadyExists exception.

" + "documentation":"

A State Manager association defines the state that you want to maintain on your instances. For example, an association can specify that anti-virus software must be installed and running on your instances, or that certain ports must be closed. For static targets, the association specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an AWS Resource Group or an AWS Autoscaling Group, State Manager applies the configuration when new instances are added to the group. The association also specifies actions to take when applying the configuration. For example, an association for anti-virus software might run once a day. If the software is not installed, then State Manager installs it. If the software is installed, but the service is not running, then the association might instruct State Manager to start the service.

" }, "CreateAssociationBatch":{ "name":"CreateAssociationBatch", @@ -135,7 +135,7 @@ {"shape":"DocumentLimitExceeded"}, {"shape":"InvalidDocumentSchemaVersion"} ], - "documentation":"

Creates a Systems Manager document.

After you create a document, you can use CreateAssociation to associate it with one or more running instances.

" + "documentation":"

Creates a Systems Manager (SSM) document. An SSM document defines the actions that Systems Manager performs on your managed instances. For more information about SSM documents, including information about supported schemas, features, and syntax, see AWS Systems Manager Documents in the AWS Systems Manager User Guide.

" }, "CreateMaintenanceWindow":{ "name":"CreateMaintenanceWindow", @@ -166,7 +166,7 @@ {"shape":"OpsItemLimitExceededException"}, {"shape":"OpsItemInvalidParameterException"} ], - "documentation":"

Creates a new OpsItem. You must have permission in AWS Identity and Access Management (IAM) to create a new OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" + "documentation":"

Creates a new OpsItem. You must have permission in AWS Identity and Access Management (IAM) to create a new OpsItem. For more information, see Getting started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" }, "CreatePatchBaseline":{ "name":"CreatePatchBaseline", @@ -197,7 +197,7 @@ {"shape":"ResourceDataSyncAlreadyExistsException"}, {"shape":"ResourceDataSyncInvalidConfigurationException"} ], - "documentation":"

A resource data sync helps you view data from multiple sources in a single location. Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource.

You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple AWS Regions to a single Amazon S3 bucket. For more information, see Configuring Resource Data Sync for Inventory in the AWS Systems Manager User Guide.

You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple AWS Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple AWS accounts and Regions or EntireOrganization by using AWS Organizations. For more information, see Setting Up Explorer to Display Data from Multiple Accounts and Regions in the AWS Systems Manager User Guide.

A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.

" + "documentation":"

A resource data sync helps you view data from multiple sources in a single location. Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource.

You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple AWS Regions to a single S3 bucket. For more information, see Configuring Resource Data Sync for Inventory in the AWS Systems Manager User Guide.

You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple AWS Regions to a single S3 bucket. This type can synchronize OpsItems and OpsData from multiple AWS accounts and Regions or EntireOrganization by using AWS Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the AWS Systems Manager User Guide.

A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.

" }, "DeleteActivation":{ "name":"DeleteActivation", @@ -591,7 +591,7 @@ {"shape":"InvalidInstanceInformationFilterValue"}, {"shape":"InvalidFilterKey"} ], - "documentation":"

Describes one or more of your instances. You can use this to get information about instances like the operating system platform, the SSM Agent version (Linux), status etc. If you specify one or more instance IDs, it returns information for those instances. If you do not specify instance IDs, it returns information for all your instances. If you specify an instance ID that is not valid or an instance that you do not own, you receive an error.

The IamRole field for this API action is the Amazon Identity and Access Management (IAM) role assigned to on-premises instances. This call does not return the IAM role for Amazon EC2 instances.

" + "documentation":"

Describes one or more of your instances, including information about the operating system platform, the version of SSM Agent installed on the instance, instance status, and so on.

If you specify one or more instance IDs, it returns information for those instances. If you do not specify instance IDs, it returns information for all your instances. If you specify an instance ID that is not valid or an instance that you do not own, you receive an error.

The IamRole field for this API action is the Amazon Identity and Access Management (IAM) role assigned to on-premises instances. This call does not return the IAM role for EC2 instances.

" }, "DescribeInstancePatchStates":{ "name":"DescribeInstancePatchStates", @@ -773,7 +773,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Query a set of OpsItems. You must have permission in AWS Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" + "documentation":"

Query a set of OpsItems. You must have permission in AWS Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Getting started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" }, "DescribeParameters":{ "name":"DescribeParameters", @@ -918,7 +918,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Retrieves the Session Manager connection status for an instance to determine whether it is connected and ready to receive Session Manager connections.

" + "documentation":"

Retrieves the Session Manager connection status for an instance to determine whether it is running and ready to receive Session Manager connections.

" }, "GetDefaultPatchBaseline":{ "name":"GetDefaultPatchBaseline", @@ -995,7 +995,7 @@ {"shape":"InvalidTypeNameException"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Return a list of inventory type names for the account, or return a list of attribute names for a specific Inventory item type.

" + "documentation":"

Return a list of inventory type names for the account, or return a list of attribute names for a specific Inventory item type.

" }, "GetMaintenanceWindow":{ "name":"GetMaintenanceWindow", @@ -1079,7 +1079,7 @@ {"shape":"InternalServerError"}, {"shape":"OpsItemNotFoundException"} ], - "documentation":"

Get information about an OpsItem by using the ID. You must have permission in AWS Identity and Access Management (IAM) to view information about an OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" + "documentation":"

Get information about an OpsItem by using the ID. You must have permission in AWS Identity and Access Management (IAM) to view information about an OpsItem. For more information, see Getting started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" }, "GetOpsSummary":{ "name":"GetOpsSummary", @@ -1300,7 +1300,7 @@ {"shape":"InvalidFilter"}, {"shape":"InvalidNextToken"} ], - "documentation":"

For a specified resource ID, this API action returns a list of compliance statuses for different resource types. Currently, you can only specify one resource ID per call. List results depend on the criteria specified in the filter.

" + "documentation":"

For a specified resource ID, this API action returns a list of compliance statuses for different resource types. Currently, you can only specify one resource ID per call. List results depend on the criteria specified in the filter.

" }, "ListComplianceSummaries":{ "name":"ListComplianceSummaries", @@ -1315,7 +1315,7 @@ {"shape":"InvalidNextToken"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns a summary count of compliant and non-compliant resources for a compliance type. For example, this call can return State Manager associations, patches, or custom compliance types according to the filter criteria that you specify.

" + "documentation":"

Returns a summary count of compliant and non-compliant resources for a compliance type. For example, this call can return State Manager associations, patches, or custom compliance types according to the filter criteria that you specify.

" }, "ListDocumentVersions":{ "name":"ListDocumentVersions", @@ -1691,7 +1691,7 @@ {"shape":"TargetNotConnected"}, {"shape":"InternalServerError"} ], - "documentation":"

Initiates a connection to a target (for example, an instance) for a Session Manager session. Returns a URL and token that can be used to open a WebSocket connection for sending input and receiving outputs.

AWS CLI usage: start-session is an interactive command that requires the Session Manager plugin to be installed on the client machine making the call. For information, see Install the Session Manager Plugin for the AWS CLI in the AWS Systems Manager User Guide.

AWS Tools for PowerShell usage: Start-SSMSession is not currently supported by AWS Tools for PowerShell on Windows local machines.

" + "documentation":"

Initiates a connection to a target (for example, an instance) for a Session Manager session. Returns a URL and token that can be used to open a WebSocket connection for sending input and receiving outputs.

AWS CLI usage: start-session is an interactive command that requires the Session Manager plugin to be installed on the client machine making the call. For information, see Install the Session Manager plugin for the AWS CLI in the AWS Systems Manager User Guide.

AWS Tools for PowerShell usage: Start-SSMSession is not currently supported by AWS Tools for PowerShell on Windows local machines.

" }, "StopAutomationExecution":{ "name":"StopAutomationExecution", @@ -1856,7 +1856,7 @@ {"shape":"InvalidInstanceId"}, {"shape":"InternalServerError"} ], - "documentation":"

Assigns or changes an Amazon Identity and Access Management (IAM) role for the managed instance.

" + "documentation":"

Changes the Amazon Identity and Access Management (IAM) role that is assigned to the on-premises instance or virtual machines (VM). IAM roles are first assigned to these hybrid instances during the activation process. For more information, see CreateActivation.

" }, "UpdateOpsItem":{ "name":"UpdateOpsItem", @@ -1873,7 +1873,7 @@ {"shape":"OpsItemLimitExceededException"}, {"shape":"OpsItemInvalidParameterException"} ], - "documentation":"

Edit or change an OpsItem. You must have permission in AWS Identity and Access Management (IAM) to update an OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" + "documentation":"

Edit or change an OpsItem. You must have permission in AWS Identity and Access Management (IAM) to update an OpsItem. For more information, see Getting started with OpsCenter in the AWS Systems Manager User Guide.

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" }, "UpdatePatchBaseline":{ "name":"UpdatePatchBaseline", @@ -1903,7 +1903,7 @@ {"shape":"ResourceDataSyncConflictException"}, {"shape":"InternalServerError"} ], - "documentation":"

Update a resource data sync. After you create a resource data sync for a Region, you can't change the account options for that sync. For example, if you create a sync in the us-east-2 (Ohio) Region and you choose the Include only the current account option, you can't edit that sync later and choose the Include all accounts from my AWS Organizations configuration option. Instead, you must delete the first resource data sync, and create a new one.

" + "documentation":"

Update a resource data sync. After you create a resource data sync for a Region, you can't change the account options for that sync. For example, if you create a sync in the us-east-2 (Ohio) Region and you choose the Include only the current account option, you can't edit that sync later and choose the Include all accounts from my AWS Organizations configuration option. Instead, you must delete the first resource data sync, and create a new one.

This API action only supports a resource data sync that was created with a SyncFromSource SyncType.

" }, "UpdateServiceSetting":{ "name":"UpdateServiceSetting", @@ -2066,6 +2066,7 @@ "documentation":"

Error returned if an attempt is made to register a patch group with a patch baseline that is already registered with a different patch baseline.

", "exception":true }, + "ApplyOnlyAtCronInterval":{"type":"boolean"}, "ApproveAfterDays":{ "type":"integer", "max":100, @@ -2198,7 +2199,7 @@ }, "OutputLocation":{ "shape":"InstanceAssociationOutputLocation", - "documentation":"

An Amazon S3 bucket where you want to store the output details of the request.

" + "documentation":"

An S3 bucket where you want to store the output details of the request.

" }, "LastExecutionDate":{ "shape":"DateTime", @@ -2223,6 +2224,14 @@ "ComplianceSeverity":{ "shape":"AssociationComplianceSeverity", "documentation":"

The severity level that is assigned to the association.

" + }, + "SyncCompliance":{ + "shape":"AssociationSyncCompliance", + "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + }, + "ApplyOnlyAtCronInterval":{ + "shape":"ApplyOnlyAtCronInterval", + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it.

" } }, "documentation":"

Describes the parameters for a document.

" @@ -2441,7 +2450,8 @@ "AssociationStatusName", "LastExecutedBefore", "LastExecutedAfter", - "AssociationName" + "AssociationName", + "ResourceGroupName" ] }, "AssociationFilterList":{ @@ -2554,6 +2564,13 @@ "Failed" ] }, + "AssociationSyncCompliance":{ + "type":"string", + "enum":[ + "AUTO", + "MANUAL" + ] + }, "AssociationVersion":{ "type":"string", "pattern":"([$]LATEST)|([1-9][0-9]*)" @@ -2612,6 +2629,14 @@ "ComplianceSeverity":{ "shape":"AssociationComplianceSeverity", "documentation":"

The severity level that is assigned to the association.

" + }, + "SyncCompliance":{ + "shape":"AssociationSyncCompliance", + "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + }, + "ApplyOnlyAtCronInterval":{ + "shape":"ApplyOnlyAtCronInterval", + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it.

" } }, "documentation":"

Information about the association version.

" @@ -2965,7 +2990,7 @@ }, "LogFile":{ "shape":"String", - "documentation":"

An Amazon S3 bucket where execution information is stored.

" + "documentation":"

An S3 bucket where execution information is stored.

" }, "Outputs":{ "shape":"AutomationParameterMap", @@ -3021,7 +3046,7 @@ }, "AutomationType":{ "shape":"AutomationType", - "documentation":"

Use this filter with DescribeAutomationExecutions. Specify either Local or CrossAccount. CrossAccount is an Automation that runs in multiple AWS Regions and accounts. For more information, see Executing Automations in Multiple AWS Regions and Accounts in the AWS Systems Manager User Guide.

" + "documentation":"

Use this filter with DescribeAutomationExecutions. Specify either Local or CrossAccount. CrossAccount is an Automation that runs in multiple AWS Regions and accounts. For more information, see Running Automation workflows in multiple AWS Regions and accounts in the AWS Systems Manager User Guide.

" } }, "documentation":"

Details about a specific Automation execution.

" @@ -3235,11 +3260,11 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the command execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding Command Statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to any instances.

  • In Progress: The command has been sent to at least one instance but has not reached a final state on all instances.

  • Success: The command successfully ran on all invocations. This is a terminal state.

  • Delivery Timed Out: The value of MaxErrors or more command invocations shows a status of Delivery Timed Out. This is a terminal state.

  • Execution Timed Out: The value of MaxErrors or more command invocations shows a status of Execution Timed Out. This is a terminal state.

  • Failed: The value of MaxErrors or more command invocations shows a status of Failed. This is a terminal state.

  • Incomplete: The command was attempted on all instances and one or more invocations does not have a value of Success but not enough invocations failed for the status to be Failed. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Rate Exceeded: The number of instances targeted by the command exceeded the account limit for pending invocations. The system has canceled the command before running it on any instance. This is a terminal state.

" + "documentation":"

A detailed status of the command execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to any instances.

  • In Progress: The command has been sent to at least one instance but has not reached a final state on all instances.

  • Success: The command successfully ran on all invocations. This is a terminal state.

  • Delivery Timed Out: The value of MaxErrors or more command invocations shows a status of Delivery Timed Out. This is a terminal state.

  • Execution Timed Out: The value of MaxErrors or more command invocations shows a status of Execution Timed Out. This is a terminal state.

  • Failed: The value of MaxErrors or more command invocations shows a status of Failed. This is a terminal state.

  • Incomplete: The command was attempted on all instances and one or more invocations does not have a value of Success but not enough invocations failed for the status to be Failed. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Rate Exceeded: The number of instances targeted by the command exceeded the account limit for pending invocations. The system has canceled the command before running it on any instance. This is a terminal state.

" }, "OutputS3Region":{ "shape":"S3Region", - "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon S3 bucket region.

" + "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Region of the S3 bucket.

" }, "OutputS3BucketName":{ "shape":"S3BucketName", @@ -3251,11 +3276,11 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

The maximum number of instances that are allowed to run the command at the same time. You can specify a number of instances, such as 10, or a percentage of instances, such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Running Commands Using Systems Manager Run Command in the AWS Systems Manager User Guide.

" + "documentation":"

The maximum number of instances that are allowed to run the command at the same time. You can specify a number of instances, such as 10, or a percentage of instances, such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Running commands using Systems Manager Run Command in the AWS Systems Manager User Guide.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The maximum number of errors allowed before the system stops sending the command to additional targets. You can specify a number of errors, such as 10, or a percentage or errors, such as 10%. The default value is 0. For more information about how to use MaxErrors, see Running Commands Using Systems Manager Run Command in the AWS Systems Manager User Guide.

" + "documentation":"

The maximum number of errors allowed before the system stops sending the command to additional targets. You can specify a number of errors, such as 10, or a percentage or errors, such as 10%. The default value is 0. For more information about how to use MaxErrors, see Running commands using Systems Manager Run Command in the AWS Systems Manager User Guide.

" }, "TargetCount":{ "shape":"TargetCount", @@ -3284,6 +3309,10 @@ "CloudWatchOutputConfig":{ "shape":"CloudWatchOutputConfig", "documentation":"

CloudWatch Logs information where you want Systems Manager to send the command output.

" + }, + "TimeoutSeconds":{ + "shape":"TimeoutSeconds", + "documentation":"

The TimeoutSeconds value specified for a command.

" } }, "documentation":"

Describes a command request.

" @@ -3345,7 +3374,7 @@ }, "InstanceName":{ "shape":"InstanceTagName", - "documentation":"

The name of the invocation target. For Amazon EC2 instances this is the value for the aws:Name tag. For on-premises instances, this is the name of the instance.

" + "documentation":"

The name of the invocation target. For EC2 instances this is the value for the aws:Name tag. For on-premises instances, this is the name of the instance.

" }, "Comment":{ "shape":"Comment", @@ -3369,7 +3398,7 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the command execution for each invocation (each instance targeted by the command). StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding Command Statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" + "documentation":"

A detailed status of the command execution for each invocation (each instance targeted by the command). StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" }, "TraceOutput":{ "shape":"InvocationTraceOutput", @@ -3377,11 +3406,11 @@ }, "StandardOutputUrl":{ "shape":"Url", - "documentation":"

The URL to the plugin's StdOut file in Amazon S3, if the Amazon S3 bucket was defined for the parent command. For an invocation, StandardOutputUrl is populated if there is just one plugin defined for the command, and the Amazon S3 bucket was defined for the command.

" + "documentation":"

The URL to the plugin's StdOut file in Amazon S3, if the S3 bucket was defined for the parent command. For an invocation, StandardOutputUrl is populated if there is just one plugin defined for the command, and the S3 bucket was defined for the command.

" }, "StandardErrorUrl":{ "shape":"Url", - "documentation":"

The URL to the plugin's StdErr file in Amazon S3, if the Amazon S3 bucket was defined for the parent command. For an invocation, StandardErrorUrl is populated if there is just one plugin defined for the command, and the Amazon S3 bucket was defined for the command.

" + "documentation":"

The URL to the plugin's StdErr file in Amazon S3, if the S3 bucket was defined for the parent command. For an invocation, StandardErrorUrl is populated if there is just one plugin defined for the command, and the S3 bucket was defined for the command.

" }, "CommandPlugins":{"shape":"CommandPluginList"}, "ServiceRole":{ @@ -3438,7 +3467,7 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the plugin execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding Command Statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist, or it might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit, and they don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" + "documentation":"

A detailed status of the plugin execution. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Success: The execution of the command or plugin was successfully completed. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: Command execution started on the instance, but the execution was not complete before the execution timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command was not successful on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist, or it might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit, and they don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" }, "ResponseCode":{ "shape":"ResponseCode", @@ -3458,7 +3487,7 @@ }, "StandardOutputUrl":{ "shape":"Url", - "documentation":"

The URL for the complete text written by the plugin to stdout in Amazon S3. If the Amazon S3 bucket for the command was not specified, then this string is empty.

" + "documentation":"

The URL for the complete text written by the plugin to stdout in Amazon S3. If the S3 bucket for the command was not specified, then this string is empty.

" }, "StandardErrorUrl":{ "shape":"Url", @@ -3466,15 +3495,15 @@ }, "OutputS3Region":{ "shape":"S3Region", - "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon S3 bucket region.

" + "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the S3 bucket region.

" }, "OutputS3BucketName":{ "shape":"S3BucketName", - "documentation":"

The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:

test_folder/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-1234567876543/awsrunShellScript

test_folder is the name of the Amazon S3 bucket;

ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

i-1234567876543 is the instance ID;

awsrunShellScript is the name of the plugin.

" + "documentation":"

The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:

test_folder/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-1234567876543/awsrunShellScript

test_folder is the name of the S3 bucket;

ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

i-1234567876543 is the instance ID;

awsrunShellScript is the name of the plugin.

" }, "OutputS3KeyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:

test_folder/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-1234567876543/awsrunShellScript

test_folder is the name of the Amazon S3 bucket;

ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

i-1234567876543 is the instance ID;

awsrunShellScript is the name of the plugin.

" + "documentation":"

The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:

test_folder/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-1234567876543/awsrunShellScript

test_folder is the name of the S3 bucket;

ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

i-1234567876543 is the instance ID;

awsrunShellScript is the name of the plugin.

" } }, "documentation":"

Describes plugin details.

" @@ -3587,7 +3616,7 @@ "documentation":"

A \"Key\": \"Value\" tag combination for the compliance item.

" } }, - "documentation":"

Information about the compliance as defined by the resource type. For example, for a patch resource type, Items includes information about the PatchSeverity, Classification, etc.

" + "documentation":"

Information about the compliance as defined by the resource type. For example, for a patch resource type, Items includes information about the PatchSeverity, Classification, and so on.

" }, "ComplianceItemContentHash":{ "type":"string", @@ -3761,6 +3790,13 @@ "min":1, "pattern":"[A-Za-z0-9_\\-]\\w+|Custom:[a-zA-Z0-9_\\-]\\w+" }, + "ComplianceUploadType":{ + "type":"string", + "enum":[ + "COMPLETE", + "PARTIAL" + ] + }, "CompliantSummary":{ "type":"structure", "members":{ @@ -3802,7 +3838,7 @@ }, "IamRole":{ "shape":"IamRole", - "documentation":"

The Amazon Identity and Access Management (IAM) role that you want to assign to the managed instance. This IAM role must provide AssumeRole permissions for the Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM Service Role for a Hybrid Environment in the AWS Systems Manager User Guide.

" + "documentation":"

The Amazon Identity and Access Management (IAM) role that you want to assign to the managed instance. This IAM role must provide AssumeRole permissions for the Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM service role for a hybrid environment in the AWS Systems Manager User Guide.

" }, "RegistrationLimit":{ "shape":"RegistrationLimit", @@ -3881,7 +3917,7 @@ }, "OutputLocation":{ "shape":"InstanceAssociationOutputLocation", - "documentation":"

An Amazon S3 bucket where you want to store the results of this request.

" + "documentation":"

An S3 bucket where you want to store the results of this request.

" }, "AssociationName":{ "shape":"AssociationName", @@ -3898,6 +3934,14 @@ "ComplianceSeverity":{ "shape":"AssociationComplianceSeverity", "documentation":"

The severity level to assign to the association.

" + }, + "SyncCompliance":{ + "shape":"AssociationSyncCompliance", + "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + }, + "ApplyOnlyAtCronInterval":{ + "shape":"ApplyOnlyAtCronInterval", + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it.

" } }, "documentation":"

Describes the association of a Systems Manager SSM document and an instance.

" @@ -3937,7 +3981,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The targets (either instances or tags) for the association. You must specify a value for Targets if you don't specify a value for InstanceId.

" + "documentation":"

The targets for the association. You can target instances by using tags, AWS Resource Groups, all instances in an AWS account, or individual instance IDs. For more information about choosing targets for an association, see Using targets and rate controls with State Manager associations in the AWS Systems Manager User Guide.

" }, "ScheduleExpression":{ "shape":"ScheduleExpression", @@ -3945,7 +3989,7 @@ }, "OutputLocation":{ "shape":"InstanceAssociationOutputLocation", - "documentation":"

An Amazon S3 bucket where you want to store the output details of the request.

" + "documentation":"

An S3 bucket where you want to store the output details of the request.

" }, "AssociationName":{ "shape":"AssociationName", @@ -3966,6 +4010,14 @@ "ComplianceSeverity":{ "shape":"AssociationComplianceSeverity", "documentation":"

The severity level to assign to the association.

" + }, + "SyncCompliance":{ + "shape":"AssociationSyncCompliance", + "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + }, + "ApplyOnlyAtCronInterval":{ + "shape":"ApplyOnlyAtCronInterval", + "documentation":"

By default, when you create a new associations, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it.

" } } }, @@ -3987,11 +4039,11 @@ "members":{ "Content":{ "shape":"DocumentContent", - "documentation":"

A valid JSON or YAML string.

" + "documentation":"

The content for the new SSM document in JSON or YAML format. We recommend storing the contents for your new document in an external JSON or YAML file and referencing the file in a command.

For examples, see the following topics in the AWS Systems Manager User Guide.

" }, "Requires":{ "shape":"DocumentRequiresList", - "documentation":"

A list of SSM documents required by a document. For example, an ApplicationConfiguration document requires an ApplicationConfigurationSchema document.

" + "documentation":"

A list of SSM documents required by a document. This parameter is used exclusively by AWS AppConfig. When a user creates an AppConfig configuration in an SSM document, the user must also specify a required document for validation purposes. In this case, an ApplicationConfiguration document requires an ApplicationConfigurationSchema document for validation purposes. For more information, see AWS AppConfig in the AWS Systems Manager User Guide.

" }, "Attachments":{ "shape":"AttachmentsSourceList", @@ -3999,7 +4051,7 @@ }, "Name":{ "shape":"DocumentName", - "documentation":"

A name for the Systems Manager document.

Do not use the following to begin the names of documents you create. They are reserved by AWS for use as document prefixes:

  • aws

  • amazon

  • amzn

" + "documentation":"

A name for the Systems Manager document.

You can't use the following strings as document name prefixes. These are reserved by AWS for use as document name prefixes:

  • aws-

  • amazon

  • amzn

" }, "VersionName":{ "shape":"DocumentVersionName", @@ -4015,7 +4067,7 @@ }, "TargetType":{ "shape":"TargetType", - "documentation":"

Specify a target type to define the kinds of resources the document can run on. For example, to run a document on EC2 instances, specify the following value: /AWS::EC2::Instance. If you specify a value of '/' the document can run on all types of resources. If you don't specify a value, the document can't run on any resources. For a list of valid resource types, see AWS Resource Types Reference in the AWS CloudFormation User Guide.

" + "documentation":"

Specify a target type to define the kinds of resources the document can run on. For example, to run a document on EC2 instances, specify the following value: /AWS::EC2::Instance. If you specify a value of '/' the document can run on all types of resources. If you don't specify a value, the document can't run on any resources. For a list of valid resource types, see AWS resource and property types reference in the AWS CloudFormation User Guide.

" }, "Tags":{ "shape":"TagList", @@ -4066,6 +4118,11 @@ "shape":"MaintenanceWindowTimezone", "documentation":"

The time zone that the scheduled maintenance window executions are based on, in Internet Assigned Numbers Authority (IANA) format. For example: \"America/Los_Angeles\", \"etc/UTC\", or \"Asia/Seoul\". For more information, see the Time Zone Database on the IANA website.

" }, + "ScheduleOffset":{ + "shape":"MaintenanceWindowOffset", + "documentation":"

The number of days to wait after the date and time specified by a CRON expression before running the maintenance window.

For example, the following cron expression schedules a maintenance window to run on the third Tuesday of every month at 11:30 PM.

cron(0 30 23 ? * TUE#3 *)

If the schedule offset is 2, the maintenance window won't run until two days later.

", + "box":true + }, "Duration":{ "shape":"MaintenanceWindowDurationHours", "documentation":"

The duration of the maintenance window in hours.

" @@ -4112,7 +4169,7 @@ }, "OperationalData":{ "shape":"OpsItemOperationalData", - "documentation":"

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems Manually in the AWS Systems Manager User Guide.

" + "documentation":"

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems manually in the AWS Systems Manager User Guide.

" }, "Notifications":{ "shape":"OpsItemNotifications", @@ -4128,7 +4185,7 @@ }, "Source":{ "shape":"OpsItemSource", - "documentation":"

The origin of the OpsItem, such as Amazon EC2 or AWS Systems Manager.

" + "documentation":"

The origin of the OpsItem, such as Amazon EC2 or Systems Manager.

The source name can't contain the following strings: aws, amazon, and amzn.

" }, "Title":{ "shape":"OpsItemTitle", @@ -4136,7 +4193,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Optional metadata that you assign to a resource. You can restrict access to OpsItems by using an inline IAM policy that specifies tags. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

Tags use a key-value pair. For example:

Key=Department,Value=Finance

To add tags to an existing OpsItem, use the AddTagsToResource action.

" + "documentation":"

Optional metadata that you assign to a resource. You can restrict access to OpsItems by using an inline IAM policy that specifies tags. For more information, see Getting started with OpsCenter in the AWS Systems Manager User Guide.

Tags use a key-value pair. For example:

Key=Department,Value=Finance

To add tags to an existing OpsItem, use the AddTagsToResource action.

" }, "Category":{ "shape":"OpsItemCategory", @@ -4179,7 +4236,7 @@ }, "ApprovedPatches":{ "shape":"PatchIdList", - "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see Package Name Formats for Approved and Rejected Patch Lists in the AWS Systems Manager User Guide.

" + "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" }, "ApprovedPatchesComplianceLevel":{ "shape":"PatchComplianceLevel", @@ -4192,7 +4249,7 @@ }, "RejectedPatches":{ "shape":"PatchIdList", - "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see Package Name Formats for Approved and Rejected Patch Lists in the AWS Systems Manager User Guide.

" + "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" }, "RejectedPatchesAction":{ "shape":"PatchAction", @@ -4236,15 +4293,15 @@ }, "S3Destination":{ "shape":"ResourceDataSyncS3Destination", - "documentation":"

Amazon S3 configuration details for the sync.

" + "documentation":"

Amazon S3 configuration details for the sync. This parameter is required if the SyncType value is SyncToDestination.

" }, "SyncType":{ "shape":"ResourceDataSyncType", - "documentation":"

Specify SyncToDestination to create a resource data sync that synchronizes data from multiple AWS Regions to an Amazon S3 bucket. Specify SyncFromSource to synchronize data from multiple AWS accounts and Regions, as listed in AWS Organizations.

" + "documentation":"

Specify SyncToDestination to create a resource data sync that synchronizes data to an S3 bucket for Inventory. If you specify SyncToDestination, you must provide a value for S3Destination. Specify SyncFromSource to synchronize data from a single account and multiple Regions, or multiple AWS accounts and Regions, as listed in AWS Organizations for Explorer. If you specify SyncFromSource, you must provide a value for SyncSource. The default value is SyncToDestination.

" }, "SyncSource":{ "shape":"ResourceDataSyncSource", - "documentation":"

Specify information about the data sources to synchronize.

" + "documentation":"

Specify information about the data sources to synchronize. This parameter is required if the SyncType value is SyncFromSource.

" } } }, @@ -4370,7 +4427,7 @@ }, "DeletionSummary":{ "shape":"InventoryDeletionSummary", - "documentation":"

A summary of the delete operation. For more information about this summary, see Understanding the Delete Inventory Summary in the AWS Systems Manager User Guide.

" + "documentation":"

A summary of the delete operation. For more information about this summary, see Deleting custom inventory in the AWS Systems Manager User Guide.

" } } }, @@ -4875,7 +4932,7 @@ }, "AccountSharingInfoList":{ "shape":"AccountSharingInfoList", - "documentation":"

A list of of AWS accounts where the current document is shared and the version shared with each account.

" + "documentation":"

A list of AWS accounts where the current document is shared and the version shared with each account.

" } } }, @@ -5007,11 +5064,11 @@ "members":{ "InstanceInformationFilterList":{ "shape":"InstanceInformationFilterList", - "documentation":"

This is a legacy method. We recommend that you don't use this method. Instead, use the InstanceInformationFilter action. The InstanceInformationFilter action enables you to return instance information by using tags that are specified as a key-value mapping.

If you do use this method, then you can't use the InstanceInformationFilter action. Using this method and the InstanceInformationFilter action causes an exception error.

" + "documentation":"

This is a legacy method. We recommend that you don't use this method. Instead, use the Filters data type. Filters enables you to return instance information by filtering based on tags applied to managed instances.

Attempting to use InstanceInformationFilterList and Filters leads to an exception error.

" }, "Filters":{ "shape":"InstanceInformationStringFilterList", - "documentation":"

One or more filters. Use a filter to return a more specific list of instances. You can filter on Amazon EC2 tag. Specify tags by using a key-value mapping.

" + "documentation":"

One or more filters. Use a filter to return a more specific list of instances. You can filter based on tags applied to EC2 instances. Use this Filters data type instead of InstanceInformationFilterList, which is deprecated.

" }, "MaxResults":{ "shape":"MaxResultsEC2Compatible", @@ -5477,7 +5534,7 @@ "members":{ "OpsItemFilters":{ "shape":"OpsItemFilters", - "documentation":"

One or more filters to limit the reponse.

  • Key: CreatedTime

    Operations: GreaterThan, LessThan

  • Key: LastModifiedBy

    Operations: Contains, Equals

  • Key: LastModifiedTime

    Operations: GreaterThan, LessThan

  • Key: Priority

    Operations: Equals

  • Key: Source

    Operations: Contains, Equals

  • Key: Status

    Operations: Equals

  • Key: Title

    Operations: Contains

  • Key: OperationalData*

    Operations: Equals

  • Key: OperationalDataKey

    Operations: Equals

  • Key: OperationalDataValue

    Operations: Equals, Contains

  • Key: OpsItemId

    Operations: Equals

  • Key: ResourceId

    Operations: Contains

  • Key: AutomationId

    Operations: Equals

*If you filter the response by using the OperationalData operator, specify a key-value pair by using the following JSON format: {\"key\":\"key_name\",\"value\":\"a_value\"}

" + "documentation":"

One or more filters to limit the response.

  • Key: CreatedTime

    Operations: GreaterThan, LessThan

  • Key: LastModifiedBy

    Operations: Contains, Equals

  • Key: LastModifiedTime

    Operations: GreaterThan, LessThan

  • Key: Priority

    Operations: Equals

  • Key: Source

    Operations: Contains, Equals

  • Key: Status

    Operations: Equals

  • Key: Title

    Operations: Contains

  • Key: OperationalData*

    Operations: Equals

  • Key: OperationalDataKey

    Operations: Equals

  • Key: OperationalDataValue

    Operations: Equals, Contains

  • Key: OpsItemId

    Operations: Equals

  • Key: ResourceId

    Operations: Contains

  • Key: AutomationId

    Operations: Equals

*If you filter the response by using the OperationalData operator, specify a key-value pair by using the following JSON format: {\"key\":\"key_name\",\"value\":\"a_value\"}

" }, "MaxResults":{ "shape":"OpsItemMaxResults", @@ -5844,7 +5901,7 @@ }, "TargetType":{ "shape":"TargetType", - "documentation":"

The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see AWS Resource Types Reference in the AWS CloudFormation User Guide.

" + "documentation":"

The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see AWS resource and property types reference in the AWS CloudFormation User Guide.

" }, "Tags":{ "shape":"TagList", @@ -5852,7 +5909,7 @@ }, "AttachmentsInformation":{ "shape":"AttachmentInformationList", - "documentation":"

Details about the document attachments, including names, locations, sizes, etc.

" + "documentation":"

Details about the document attachments, including names, locations, sizes, and so on.

" }, "Requires":{ "shape":"DocumentRequiresList", @@ -5877,7 +5934,7 @@ "documentation":"

The value of the filter.

" } }, - "documentation":"

Describes a filter.

" + "documentation":"

This data type is deprecated. Instead, use DocumentKeyValuesFilter.

" }, "DocumentFilterKey":{ "type":"string", @@ -5953,7 +6010,7 @@ }, "TargetType":{ "shape":"TargetType", - "documentation":"

The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see AWS Resource Types Reference in the AWS CloudFormation User Guide.

" + "documentation":"

The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see AWS resource and property types reference in the AWS CloudFormation User Guide.

" }, "Tags":{ "shape":"TagList", @@ -5982,7 +6039,7 @@ "documentation":"

The value for the filter key.

" } }, - "documentation":"

One or more filters. Use a filter to return a more specific list of documents.

For keys, you can specify one or more tags that have been applied to a document.

Other valid values include Owner, Name, PlatformTypes, and DocumentType.

Note that only one Owner can be specified in a request. For example: Key=Owner,Values=Self.

If you use Name as a key, you can use a name prefix to return a list of documents. For example, in the AWS CLI, to return a list of all documents that begin with Te, run the following command:

aws ssm list-documents --filters Key=Name,Values=Te

If you specify more than two keys, only documents that are identified by all the tags are returned in the results. If you specify more than two values for a key, documents that are identified by any of the values are returned in the results.

To specify a custom key and value pair, use the format Key=tag:[tagName],Values=[valueName].

For example, if you created a Key called region and are using the AWS CLI to call the list-documents command:

aws ssm list-documents --filters Key=tag:region,Values=east,west Key=Owner,Values=Self

" + "documentation":"

One or more filters. Use a filter to return a more specific list of documents.

For keys, you can specify one or more tags that have been applied to a document.

Other valid values include Owner, Name, PlatformTypes, DocumentType, and TargetType.

Note that only one Owner can be specified in a request. For example: Key=Owner,Values=Self.

If you use Name as a key, you can use a name prefix to return a list of documents. For example, in the AWS CLI, to return a list of all documents that begin with Te, run the following command:

aws ssm list-documents --filters Key=Name,Values=Te

If you specify more than two keys, only documents that are identified by all the tags are returned in the results. If you specify more than two values for a key, documents that are identified by any of the values are returned in the results.

To specify a custom key and value pair, use the format Key=tag:tagName,Values=valueName.

For example, if you created a Key called region and are using the AWS CLI to call the list-documents command:

aws ssm list-documents --filters Key=tag:region,Values=east,west Key=Owner,Values=Self

" }, "DocumentKeyValuesFilterKey":{ "type":"string", @@ -6184,7 +6241,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Error returned when the ID specified for a resource, such as a maintenance window or Patch baseline, doesn't exist.

For information about resource quotas in Systems Manager, see Systems Manager Service Quotas in the AWS General Reference.

", + "documentation":"

Error returned when the ID specified for a resource, such as a maintenance window or Patch baseline, doesn't exist.

For information about resource quotas in Systems Manager, see Systems Manager service quotas in the AWS General Reference.

", "exception":true }, "DryRun":{"type":"boolean"}, @@ -6367,7 +6424,7 @@ }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

(Required) The ID of the managed instance targeted by the command. A managed instance can be an Amazon EC2 instance or an instance in your hybrid environment that is configured for Systems Manager.

" + "documentation":"

(Required) The ID of the managed instance targeted by the command. A managed instance can be an EC2 instance or an instance in your hybrid environment that is configured for Systems Manager.

" }, "PluginName":{ "shape":"CommandPluginName", @@ -6384,7 +6441,7 @@ }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The ID of the managed instance targeted by the command. A managed instance can be an Amazon EC2 instance or an instance in your hybrid environment that is configured for Systems Manager.

" + "documentation":"

The ID of the managed instance targeted by the command. A managed instance can be an EC2 instance or an instance in your hybrid environment that is configured for Systems Manager.

" }, "Comment":{ "shape":"Comment", @@ -6424,7 +6481,7 @@ }, "StatusDetails":{ "shape":"StatusDetails", - "documentation":"

A detailed status of the command execution for an invocation. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding Command Statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Delayed: The system attempted to send the command to the target, but the target was not available. The instance might not be available because of network issues, the instance was stopped, etc. The system will try to deliver the command again.

  • Success: The command or plugin was run successfully. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: The command started to run on the instance, but the execution was not complete before the timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command wasn't run successfully on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" + "documentation":"

A detailed status of the command execution for an invocation. StatusDetails includes more information than Status because it includes states resulting from error and concurrency control parameters. StatusDetails can show different results than Status. For more information about these statuses, see Understanding command statuses in the AWS Systems Manager User Guide. StatusDetails can be one of the following values:

  • Pending: The command has not been sent to the instance.

  • In Progress: The command has been sent to the instance but has not reached a terminal state.

  • Delayed: The system attempted to send the command to the target, but the target was not available. The instance might not be available because of network issues, because the instance was stopped, or for similar reasons. The system will try to send the command again.

  • Success: The command or plugin ran successfully. This is a terminal state.

  • Delivery Timed Out: The command was not delivered to the instance before the delivery timeout expired. Delivery timeouts do not count against the parent command's MaxErrors limit, but they do contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Execution Timed Out: The command started to run on the instance, but the execution was not complete before the timeout expired. Execution timeouts count against the MaxErrors limit of the parent command. This is a terminal state.

  • Failed: The command wasn't run successfully on the instance. For a plugin, this indicates that the result code was not zero. For a command invocation, this indicates that the result code for one or more plugins was not zero. Invocation failures count against the MaxErrors limit of the parent command. This is a terminal state.

  • Canceled: The command was terminated before it was completed. This is a terminal state.

  • Undeliverable: The command can't be delivered to the instance. The instance might not exist or might not be responding. Undeliverable invocations don't count against the parent command's MaxErrors limit and don't contribute to whether the parent command status is Success or Incomplete. This is a terminal state.

  • Terminated: The parent command exceeded its MaxErrors limit and subsequent command invocations were canceled by the system. This is a terminal state.

" }, "StandardOutputContent":{ "shape":"StandardOutputContent", @@ -6432,7 +6489,7 @@ }, "StandardOutputUrl":{ "shape":"Url", - "documentation":"

The URL for the complete text written by the plugin to stdout in Amazon S3. If an Amazon S3 bucket was not specified, then this string is empty.

" + "documentation":"

The URL for the complete text written by the plugin to stdout in Amazon S3. If an S3 bucket was not specified, then this string is empty.

" }, "StandardErrorContent":{ "shape":"StandardErrorContent", @@ -6541,7 +6598,7 @@ }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and cannot be changed.

" + "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document and can't be changed.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -6594,7 +6651,7 @@ }, "AttachmentsContent":{ "shape":"AttachmentContentList", - "documentation":"

A description of the document attachments, including names, locations, sizes, etc.

" + "documentation":"

A description of the document attachments, including names, locations, sizes, and so on.

" } } }, @@ -6911,6 +6968,11 @@ "shape":"MaintenanceWindowTimezone", "documentation":"

The time zone that the scheduled maintenance window executions are based on, in Internet Assigned Numbers Authority (IANA) format. For example: \"America/Los_Angeles\", \"etc/UTC\", or \"Asia/Seoul\". For more information, see the Time Zone Database on the IANA website.

" }, + "ScheduleOffset":{ + "shape":"MaintenanceWindowOffset", + "documentation":"

The number of days to wait to run a maintenance window after the scheduled CRON expression date and time.

", + "box":true + }, "NextExecutionTime":{ "shape":"MaintenanceWindowStringDateTime", "documentation":"

The next time the maintenance window will actually run, taking into account any specified times for the maintenance window to become active or inactive.

" @@ -7334,7 +7396,7 @@ "members":{ "SettingId":{ "shape":"ServiceSettingId", - "documentation":"

The ID of the service setting to get.

" + "documentation":"

The ID of the service setting to get. The setting ID can be /ssm/parameter-store/default-parameter-tier, /ssm/parameter-store/high-throughput-enabled, or /ssm/managed-instance/activation-tier.

" } }, "documentation":"

The request body of the GetServiceSetting API action.

" @@ -7354,10 +7416,10 @@ "members":{ "message":{ "shape":"String", - "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Requirements and Constraints for Parameter Names in the AWS Systems Manager User Guide.

" + "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Requirements and constraints for parameter names in the AWS Systems Manager User Guide.

" } }, - "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Requirements and Constraints for Parameter Names in the AWS Systems Manager User Guide.

", + "documentation":"

A hierarchy can have a maximum of 15 levels. For more information, see Requirements and constraints for parameter names in the AWS Systems Manager User Guide.

", "exception":true }, "HierarchyTypeMismatchException":{ @@ -7365,10 +7427,10 @@ "members":{ "message":{ "shape":"String", - "documentation":"

Parameter Store does not support changing a parameter type in a hierarchy. For example, you can't change a parameter from a String type to a SecureString type. You must create a new, unique parameter.

" + "documentation":"

Parameter Store does not support changing a parameter type in a hierarchy. For example, you can't change a parameter from a String type to a SecureString type. You must create a new, unique parameter.

" } }, - "documentation":"

Parameter Store does not support changing a parameter type in a hierarchy. For example, you can't change a parameter from a String type to a SecureString type. You must create a new, unique parameter.

", + "documentation":"

Parameter Store does not support changing a parameter type in a hierarchy. For example, you can't change a parameter from a String type to a SecureString type. You must create a new, unique parameter.

", "exception":true }, "IPAddress":{ @@ -7459,20 +7521,20 @@ "members":{ "S3Location":{ "shape":"S3OutputLocation", - "documentation":"

An Amazon S3 bucket where you want to store the results of this request.

" + "documentation":"

An S3 bucket where you want to store the results of this request.

" } }, - "documentation":"

An Amazon S3 bucket where you want to store the results of this request.

" + "documentation":"

An S3 bucket where you want to store the results of this request.

" }, "InstanceAssociationOutputUrl":{ "type":"structure", "members":{ "S3OutputUrl":{ "shape":"S3OutputUrl", - "documentation":"

The URL of Amazon S3 bucket where you want to store the results of this request.

" + "documentation":"

The URL of S3 bucket where you want to store the results of this request.

" } }, - "documentation":"

The URL of Amazon S3 bucket where you want to store the results of this request.

" + "documentation":"

The URL of S3 bucket where you want to store the results of this request.

" }, "InstanceAssociationStatusAggregatedCount":{ "type":"map", @@ -7524,7 +7586,7 @@ }, "OutputUrl":{ "shape":"InstanceAssociationOutputUrl", - "documentation":"

A URL for an Amazon S3 bucket where you want to store the results of this request.

" + "documentation":"

A URL for an S3 bucket where you want to store the results of this request.

" }, "AssociationName":{ "shape":"AssociationName", @@ -7591,7 +7653,7 @@ }, "IamRole":{ "shape":"IamRole", - "documentation":"

The Amazon Identity and Access Management (IAM) role assigned to the on-premises Systems Manager managed instances. This call does not return the IAM role for Amazon EC2 instances.

" + "documentation":"

The Amazon Identity and Access Management (IAM) role assigned to the on-premises Systems Manager managed instances. This call does not return the IAM role for EC2 instances.

" }, "RegistrationDate":{ "shape":"DateTime", @@ -7739,7 +7801,7 @@ }, "InstallOverrideList":{ "shape":"InstallOverrideList", - "documentation":"

An https URL or an Amazon S3 path-style URL to a list of patches to be installed. This patch installation list, which you maintain in an Amazon S3 bucket in YAML format and specify in the SSM document AWS-RunPatchBaseline, overrides the patches specified by the default patch baseline.

For more information about the InstallOverrideList parameter, see About the SSM Document AWS-RunPatchBaseline in the AWS Systems Manager User Guide.

" + "documentation":"

An https URL or an Amazon S3 path-style URL to a list of patches to be installed. This patch installation list, which you maintain in an S3 bucket in YAML format and specify in the SSM document AWS-RunPatchBaseline, overrides the patches specified by the default patch baseline.

For more information about the InstallOverrideList parameter, see About the SSM document AWS-RunPatchBaseline in the AWS Systems Manager User Guide.

" }, "OwnerInformation":{ "shape":"OwnerInformation", @@ -8224,7 +8286,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The role name can't contain invalid characters. Also verify that you specified an IAM role for notifications that includes the required trust policy. For information about configuring the IAM role for Run Command notifications, see Configuring Amazon SNS Notifications for Run Command in the AWS Systems Manager User Guide.

", + "documentation":"

The role name can't contain invalid characters. Also verify that you specified an IAM role for notifications that includes the required trust policy. For information about configuring the IAM role for Run Command notifications, see Configuring Amazon SNS Notifications for Run Command in the AWS Systems Manager User Guide.

", "exception":true }, "InvalidSchedule":{ @@ -8240,7 +8302,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The target is not valid or does not exist. It might not be configured for EC2 Systems Manager or you might not have permission to perform the operation.

", + "documentation":"

The target is not valid or does not exist. It might not be configured for Systems Manager or you might not have permission to perform the operation.

", "exception":true }, "InvalidTypeNameException":{ @@ -8331,7 +8393,7 @@ }, "DeletionSummary":{ "shape":"InventoryDeletionSummary", - "documentation":"

Information about the delete operation. For more information about this summary, see Understanding the Delete Inventory Summary in the AWS Systems Manager User Guide.

" + "documentation":"

Information about the delete operation. For more information about this summary, see Understanding the delete inventory summary in the AWS Systems Manager User Guide.

" }, "LastStatusUpdateTime":{ "shape":"InventoryDeletionLastStatusUpdateTime", @@ -8401,7 +8463,7 @@ }, "Type":{ "shape":"InventoryQueryOperatorType", - "documentation":"

The type of filter.

" + "documentation":"

The type of filter.

The Exists filter must be used with aggregators. For more information, see Aggregating inventory data in the AWS Systems Manager User Guide.

" } }, "documentation":"

One or more filters. Use a filter to return a more specific list of results.

" @@ -8728,7 +8790,7 @@ "members":{ "InvalidLabels":{ "shape":"ParameterLabelList", - "documentation":"

The label does not meet the requirements. For information about parameter label requirements, see Labeling Parameters in the AWS Systems Manager User Guide.

" + "documentation":"

The label does not meet the requirements. For information about parameter label requirements, see Labeling parameters in the AWS Systems Manager User Guide.

" }, "ParameterVersion":{ "shape":"PSParameterVersion", @@ -8832,7 +8894,7 @@ }, "Filters":{ "shape":"CommandFilterList", - "documentation":"

(Optional) One or more filters. Use a filter to return a more specific list of results. Note that the DocumentName filter is not supported for ListCommandInvocations.

" + "documentation":"

(Optional) One or more filters. Use a filter to return a more specific list of results.

" }, "Details":{ "shape":"Boolean", @@ -8999,11 +9061,11 @@ "members":{ "DocumentFilterList":{ "shape":"DocumentFilterList", - "documentation":"

One or more filters. Use a filter to return a more specific list of results.

" + "documentation":"

This data type is deprecated. Instead, use Filters.

" }, "Filters":{ "shape":"DocumentKeyValuesFilterList", - "documentation":"

One or more filters. Use a filter to return a more specific list of results.

" + "documentation":"

One or more DocumentKeyValuesFilter objects. Use a filter to return a more specific list of results. For keys, you can specify one or more key-value pair tags that have been applied to a document. Other valid keys include Owner, Name, PlatformTypes, DocumentType, and TargetType. For example, to return documents you own use Key=Owner,Values=Self. To specify a custom key-value pair, use the format Key=tag:tagName,Values=valueName.

" }, "MaxResults":{ "shape":"MaxResults", @@ -9185,18 +9247,18 @@ "members":{ "S3BucketName":{ "shape":"S3BucketName", - "documentation":"

The name of an Amazon S3 bucket where execution logs are stored .

" + "documentation":"

The name of an S3 bucket where execution logs are stored .

" }, "S3KeyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

(Optional) The Amazon S3 bucket subfolder.

" + "documentation":"

(Optional) The S3 bucket subfolder.

" }, "S3Region":{ "shape":"S3Region", - "documentation":"

The region where the Amazon S3 bucket is located.

" + "documentation":"

The Region where the S3 bucket is located.

" } }, - "documentation":"

Information about an Amazon S3 bucket to write instance-level logs to.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" + "documentation":"

Information about an S3 bucket to write instance-level logs to.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" }, "Long":{"type":"long"}, "MaintenanceWindowAllowUnassociatedTargets":{"type":"boolean"}, @@ -9485,6 +9547,11 @@ "shape":"MaintenanceWindowTimezone", "documentation":"

The time zone that the scheduled maintenance window executions are based on, in Internet Assigned Numbers Authority (IANA) format.

" }, + "ScheduleOffset":{ + "shape":"MaintenanceWindowOffset", + "documentation":"

The number of days to wait to run a maintenance window after the scheduled CRON expression date and time.

", + "box":true + }, "EndDate":{ "shape":"MaintenanceWindowStringDateTime", "documentation":"

The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become inactive.

" @@ -9562,6 +9629,11 @@ "min":3, "pattern":"^[a-zA-Z0-9_\\-.]{3,128}$" }, + "MaintenanceWindowOffset":{ + "type":"integer", + "max":6, + "min":1 + }, "MaintenanceWindowResourceType":{ "type":"string", "enum":[ @@ -9595,11 +9667,11 @@ }, "OutputS3BucketName":{ "shape":"S3BucketName", - "documentation":"

The name of the Amazon S3 bucket.

" + "documentation":"

The name of the S3 bucket.

" }, "OutputS3KeyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

The Amazon S3 bucket subfolder.

" + "documentation":"

The S3 bucket subfolder.

" }, "Parameters":{ "shape":"Parameters", @@ -9728,7 +9800,7 @@ }, "LoggingInfo":{ "shape":"LoggingInfo", - "documentation":"

Information about an Amazon S3 bucket to write task-level logs to.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" + "documentation":"

Information about an S3 bucket to write task-level logs to.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" }, "ServiceRoleArn":{ "shape":"ServiceRole", @@ -9948,7 +10020,7 @@ }, "NotificationEvents":{ "shape":"NotificationEventList", - "documentation":"

The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see Configuring Amazon SNS Notifications for AWS Systems Manager in the AWS Systems Manager User Guide.

" + "documentation":"

The different events for which you can receive notifications. These events include the following: All (events), InProgress, Success, TimedOut, Cancelled, Failed. To learn more about these events, see Monitoring Systems Manager status changes using Amazon SNS notifications in the AWS Systems Manager User Guide.

" }, "NotificationType":{ "shape":"NotificationType", @@ -9988,7 +10060,9 @@ "UBUNTU", "REDHAT_ENTERPRISE_LINUX", "SUSE", - "CENTOS" + "CENTOS", + "ORACLE_LINUX", + "DEBIAN" ] }, "OpsAggregator":{ @@ -10201,7 +10275,7 @@ }, "Status":{ "shape":"OpsItemStatus", - "documentation":"

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem Details in the AWS Systems Manager User Guide.

" + "documentation":"

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem details in the AWS Systems Manager User Guide.

" }, "OpsItemId":{ "shape":"OpsItemId", @@ -10217,11 +10291,11 @@ }, "Source":{ "shape":"OpsItemSource", - "documentation":"

The origin of the OpsItem, such as Amazon EC2 or AWS Systems Manager. The impacted resource is a subset of source.

" + "documentation":"

The origin of the OpsItem, such as Amazon EC2 or Systems Manager. The impacted resource is a subset of source.

" }, "OperationalData":{ "shape":"OpsItemOperationalData", - "documentation":"

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems Manually in the AWS Systems Manager User Guide.

" + "documentation":"

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems manually in the AWS Systems Manager User Guide.

" }, "Category":{ "shape":"OpsItemCategory", @@ -10232,7 +10306,7 @@ "documentation":"

The severity of the OpsItem. Severity options range from 1 to 4.

" } }, - "documentation":"

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" + "documentation":"

Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

" }, "OpsItemAlreadyExistsException":{ "type":"structure", @@ -10362,7 +10436,7 @@ "LimitType":{"shape":"String"}, "Message":{"shape":"String"} }, - "documentation":"

The request caused OpsItems to exceed one or more quotas. For information about OpsItem quotas, see What are the resource limits for OpsCenter?.

", + "documentation":"

The request caused OpsItems to exceed one or more quotas. For information about OpsItem quotas, see What are the resource limits for OpsCenter?.

", "exception":true }, "OpsItemMaxResults":{ @@ -10505,7 +10579,6 @@ "OpsResultAttributeList":{ "type":"list", "member":{"shape":"OpsResultAttribute"}, - "max":1, "min":1 }, "OutputSource":{ @@ -10513,7 +10586,7 @@ "members":{ "OutputSourceId":{ "shape":"OutputSourceId", - "documentation":"

The ID of the output source, for example the URL of an Amazon S3 bucket.

" + "documentation":"

The ID of the output source, for example the URL of an S3 bucket.

" }, "OutputSourceType":{ "shape":"OutputSourceType", @@ -10555,7 +10628,7 @@ }, "Type":{ "shape":"ParameterType", - "documentation":"

The type of parameter. Valid values include the following: String, String list, Secure string.

" + "documentation":"

The type of parameter. Valid values include the following: String, StringList, and SecureString.

" }, "Value":{ "shape":"PSParameterValue", @@ -10580,9 +10653,13 @@ "ARN":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the parameter.

" + }, + "DataType":{ + "shape":"ParameterDataType", + "documentation":"

The data type of the parameter, such as text or aws:ec2:image. The default is text.

" } }, - "documentation":"

An Amazon EC2 Systems Manager parameter in Parameter Store.

" + "documentation":"

An Systems Manager parameter in Parameter Store.

" }, "ParameterAlreadyExists":{ "type":"structure", @@ -10592,6 +10669,11 @@ "documentation":"

The parameter already exists. You can't create duplicate parameters.

", "exception":true }, + "ParameterDataType":{ + "type":"string", + "max":128, + "min":0 + }, "ParameterDescription":{ "type":"string", "max":1024, @@ -10646,7 +10728,11 @@ }, "Policies":{ "shape":"ParameterPolicyList", - "documentation":"

Information about the policies assigned to a parameter.

Working with Parameter Policies in the AWS Systems Manager User Guide.

" + "documentation":"

Information about the policies assigned to a parameter.

Assigning parameter policies in the AWS Systems Manager User Guide.

" + }, + "DataType":{ + "shape":"ParameterDataType", + "documentation":"

The data type of the parameter, such as text or aws:ec2:image. The default is text.

" } }, "documentation":"

Information about parameter usage.

" @@ -10719,7 +10805,7 @@ }, "Type":{ "shape":"ParameterType", - "documentation":"

The type of parameter. Valid parameter types include the following: String, String list, Secure string.

" + "documentation":"

The type of parameter. Valid parameter types include the following: String, StringList, and SecureString.

" }, "KeyId":{ "shape":"ParameterKeyId", @@ -10752,6 +10838,10 @@ "Policies":{ "shape":"ParameterPolicyList", "documentation":"

A list of policies associated with a parameter.

" + }, + "DataType":{ + "shape":"ParameterDataType", + "documentation":"

The data type of the parameter, such as text or aws:ec2:image. The default is text.

" } }, "documentation":"

Metadata includes information like the ARN of the last user and the date/time the parameter was last used.

" @@ -10812,13 +10902,13 @@ "documentation":"

The value you want to search for.

" } }, - "documentation":"

One or more filters. Use a filter to return a more specific list of results.

The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath API actions. However, not all of the pattern values listed for Key can be used with both actions.

For DescribeActions, all of the listed patterns are valid, with the exception of Label.

For GetParametersByPath, the following patterns listed for Key are not valid: Name, Path, and Tier.

For examples of CLI commands demonstrating valid parameter filter constructions, see Searching for Systems Manager Parameters in the AWS Systems Manager User Guide.

" + "documentation":"

One or more filters. Use a filter to return a more specific list of results.

The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath API actions. However, not all of the pattern values listed for Key can be used with both actions.

For DescribeActions, all of the listed patterns are valid, with the exception of Label.

For GetParametersByPath, the following patterns listed for Key are not valid: Name, Path, and Tier.

For examples of CLI commands demonstrating valid parameter filter constructions, see Searching for Systems Manager parameters in the AWS Systems Manager User Guide.

" }, "ParameterStringFilterKey":{ "type":"string", "max":132, "min":1, - "pattern":"tag:.+|Name|Type|KeyId|Path|Label|Tier" + "pattern":"tag:.+|Name|Type|KeyId|Path|Label|Tier|DataType" }, "ParameterStringFilterList":{ "type":"list", @@ -11053,7 +11143,7 @@ }, "State":{ "shape":"PatchComplianceDataState", - "documentation":"

The state of the patch on the instance, such as INSTALLED or FAILED.

For descriptions of each patch state, see About Patch Compliance in the AWS Systems Manager User Guide.

" + "documentation":"

The state of the patch on the instance, such as INSTALLED or FAILED.

For descriptions of each patch state, see About patch compliance in the AWS Systems Manager User Guide.

" }, "InstalledTime":{ "shape":"DateTime", @@ -11292,16 +11382,16 @@ }, "ComplianceLevel":{ "shape":"PatchComplianceLevel", - "documentation":"

A compliance severity level for all approved patches in a patch baseline. Valid compliance severity levels include the following: Unspecified, Critical, High, Medium, Low, and Informational.

" + "documentation":"

A compliance severity level for all approved patches in a patch baseline.

" }, "ApproveAfterDays":{ "shape":"ApproveAfterDays", - "documentation":"

The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released.

", + "documentation":"

The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released. Not supported on Ubuntu Server.

", "box":true }, "ApproveUntilDate":{ "shape":"PatchStringDateTime", - "documentation":"

Example API

", + "documentation":"

The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Ubuntu Server.

Enter dates in the format YYYY-MM-DD. For example, 2020-12-31.

", "box":true }, "EnableNonSecurity":{ @@ -11495,11 +11585,16 @@ }, "Items":{ "shape":"ComplianceItemEntryList", - "documentation":"

Information about the compliance as defined by the resource type. For example, for a patch compliance type, Items includes information about the PatchSeverity, Classification, etc.

" + "documentation":"

Information about the compliance as defined by the resource type. For example, for a patch compliance type, Items includes information about the PatchSeverity, Classification, and so on.

" }, "ItemContentHash":{ "shape":"ComplianceItemContentHash", "documentation":"

MD5 or SHA-256 content hash. The content hash is used to determine if existing information should be overwritten or ignored. If the content hashes match, the request to put compliance information is ignored.

" + }, + "UploadType":{ + "shape":"ComplianceUploadType", + "documentation":"

The mode for uploading compliance items. You can specify COMPLETE or PARTIAL. In COMPLETE mode, the system overwrites all existing compliance information for the resource. You must provide a full list of compliance items each time you send the request.

In PARTIAL mode, the system overwrites compliance information for a specific association. The association must be configured with SyncCompliance set to MANUAL. By default, all requests use COMPLETE mode.

This attribute is only valid for association compliance.

", + "box":true } } }, @@ -11539,13 +11634,12 @@ "type":"structure", "required":[ "Name", - "Value", - "Type" + "Value" ], "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

  • Parameter names are case sensitive.

  • A parameter name must be unique within an AWS Region

  • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

  • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-/

  • A parameter name can't include spaces.

  • Parameter hierarchies are limited to a maximum depth of fifteen levels.

For additional information about valid values for parameter names, see Requirements and Constraints for Parameter Names in the AWS Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters:

arn:aws:ssm:us-east-2:111122223333:parameter/ExampleParameterName

" + "documentation":"

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

  • Parameter names are case sensitive.

  • A parameter name must be unique within an AWS Region

  • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

  • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-/

  • A parameter name can't include spaces.

  • Parameter hierarchies are limited to a maximum depth of fifteen levels.

For additional information about valid values for parameter names, see About requirements and constraints for parameter names in the AWS Systems Manager User Guide.

The maximum length constraint listed below includes capacity for additional system attributes that are not part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters:

arn:aws:ssm:us-east-2:111122223333:parameter/ExampleParameterName

" }, "Description":{ "shape":"ParameterDescription", @@ -11557,7 +11651,7 @@ }, "Type":{ "shape":"ParameterType", - "documentation":"

The type of parameter that you want to add to the system.

Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type.

SecureString is not currently supported for AWS CloudFormation templates or in the China Regions.

" + "documentation":"

The type of parameter that you want to add to the system.

SecureString is not currently supported for AWS CloudFormation templates or in the China Regions.

Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type.

Specifying a parameter type is not required when updating a parameter. You must specify a parameter type when creating a parameter.

" }, "KeyId":{ "shape":"ParameterKeyId", @@ -11578,11 +11672,15 @@ }, "Tier":{ "shape":"ParameterTier", - "documentation":"

The parameter tier to assign to a parameter.

Parameter Store offers a standard tier and an advanced tier for parameters. Standard parameters have a content size limit of 4 KB and can't be configured to use parameter policies. You can create a maximum of 10,000 standard parameters for each Region in an AWS account. Standard parameters are offered at no additional cost.

Advanced parameters have a content size limit of 8 KB and can be configured to use parameter policies. You can create a maximum of 100,000 advanced parameters for each Region in an AWS account. Advanced parameters incur a charge. For more information, see About Advanced Parameters in the AWS Systems Manager User Guide.

You can change a standard parameter to an advanced parameter any time. But you can't revert an advanced parameter to a standard parameter. Reverting an advanced parameter to a standard parameter would result in data loss because the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting would also remove any policies attached to the parameter. Lastly, advanced parameters use a different form of encryption than standard parameters.

If you no longer need an advanced parameter, or if you no longer want to incur charges for an advanced parameter, you must delete it and recreate it as a new standard parameter.

Using the Default Tier Configuration

In PutParameter requests, you can specify the tier to create the parameter in. Whenever you specify a tier in the request, Parameter Store creates or updates the parameter according to that request. However, if you do not specify a tier in a request, Parameter Store assigns the tier based on the current Parameter Store default tier configuration.

The default tier when you begin using Parameter Store is the standard-parameter tier. If you use the advanced-parameter tier, you can specify one of the following as the default:

  • Advanced: With this option, Parameter Store evaluates all requests as advanced parameters.

  • Intelligent-Tiering: With this option, Parameter Store evaluates each request to determine if the parameter is standard or advanced.

    If the request doesn't include any options that require an advanced parameter, the parameter is created in the standard-parameter tier. If one or more options requiring an advanced parameter are included in the request, Parameter Store create a parameter in the advanced-parameter tier.

    This approach helps control your parameter-related costs by always creating standard parameters unless an advanced parameter is necessary.

Options that require an advanced parameter include the following:

  • The content size of the parameter is more than 4 KB.

  • The parameter uses a parameter policy.

  • More than 10,000 parameters already exist in your AWS account in the current Region.

For more information about configuring the default tier option, see Specifying a Default Parameter Tier in the AWS Systems Manager User Guide.

" + "documentation":"

The parameter tier to assign to a parameter.

Parameter Store offers a standard tier and an advanced tier for parameters. Standard parameters have a content size limit of 4 KB and can't be configured to use parameter policies. You can create a maximum of 10,000 standard parameters for each Region in an AWS account. Standard parameters are offered at no additional cost.

Advanced parameters have a content size limit of 8 KB and can be configured to use parameter policies. You can create a maximum of 100,000 advanced parameters for each Region in an AWS account. Advanced parameters incur a charge. For more information, see Standard and advanced parameter tiers in the AWS Systems Manager User Guide.

You can change a standard parameter to an advanced parameter any time. But you can't revert an advanced parameter to a standard parameter. Reverting an advanced parameter to a standard parameter would result in data loss because the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting would also remove any policies attached to the parameter. Lastly, advanced parameters use a different form of encryption than standard parameters.

If you no longer need an advanced parameter, or if you no longer want to incur charges for an advanced parameter, you must delete it and recreate it as a new standard parameter.

Using the Default Tier Configuration

In PutParameter requests, you can specify the tier to create the parameter in. Whenever you specify a tier in the request, Parameter Store creates or updates the parameter according to that request. However, if you do not specify a tier in a request, Parameter Store assigns the tier based on the current Parameter Store default tier configuration.

The default tier when you begin using Parameter Store is the standard-parameter tier. If you use the advanced-parameter tier, you can specify one of the following as the default:

  • Advanced: With this option, Parameter Store evaluates all requests as advanced parameters.

  • Intelligent-Tiering: With this option, Parameter Store evaluates each request to determine if the parameter is standard or advanced.

    If the request doesn't include any options that require an advanced parameter, the parameter is created in the standard-parameter tier. If one or more options requiring an advanced parameter are included in the request, Parameter Store create a parameter in the advanced-parameter tier.

    This approach helps control your parameter-related costs by always creating standard parameters unless an advanced parameter is necessary.

Options that require an advanced parameter include the following:

  • The content size of the parameter is more than 4 KB.

  • The parameter uses a parameter policy.

  • More than 10,000 parameters already exist in your AWS account in the current Region.

For more information about configuring the default tier option, see Specifying a default parameter tier in the AWS Systems Manager User Guide.

" }, "Policies":{ "shape":"ParameterPolicies", - "documentation":"

One or more policies to apply to a parameter. This action takes a JSON array. Parameter Store supports the following policy types:

Expiration: This policy deletes the parameter after it expires. When you create the policy, you specify the expiration date. You can update the expiration date and time by updating the policy. Updating the parameter does not affect the expiration date and time. When the expiration time is reached, Parameter Store deletes the parameter.

ExpirationNotification: This policy triggers an event in Amazon CloudWatch Events that notifies you about the expiration. By using this policy, you can receive notification before or after the expiration time is reached, in units of days or hours.

NoChangeNotification: This policy triggers a CloudWatch event if a parameter has not been modified for a specified period of time. This policy type is useful when, for example, a secret needs to be changed within a period of time, but it has not been changed.

All existing policies are preserved until you send new policies or an empty policy. For more information about parameter policies, see Working with Parameter Policies.

" + "documentation":"

One or more policies to apply to a parameter. This action takes a JSON array. Parameter Store supports the following policy types:

Expiration: This policy deletes the parameter after it expires. When you create the policy, you specify the expiration date. You can update the expiration date and time by updating the policy. Updating the parameter does not affect the expiration date and time. When the expiration time is reached, Parameter Store deletes the parameter.

ExpirationNotification: This policy triggers an event in Amazon CloudWatch Events that notifies you about the expiration. By using this policy, you can receive notification before or after the expiration time is reached, in units of days or hours.

NoChangeNotification: This policy triggers a CloudWatch event if a parameter has not been modified for a specified period of time. This policy type is useful when, for example, a secret needs to be changed within a period of time, but it has not been changed.

All existing policies are preserved until you send new policies or an empty policy. For more information about parameter policies, see Assigning parameter policies.

" + }, + "DataType":{ + "shape":"ParameterDataType", + "documentation":"

The data type for a String parameter. Supported data types include plain text and Amazon Machine Image IDs.

The following data type values are supported.

  • text

  • aws:ec2:image

When you create a String parameter and specify aws:ec2:image, Systems Manager validates the parameter value is in the required format, such as ami-12345abcdeEXAMPLE, and that the specified AMI is available in your AWS account. For more information, see Native parameter support for Amazon Machine Image IDs in the AWS Systems Manager User Guide.

" } } }, @@ -11680,7 +11778,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs.

You can specify targets using instance IDs, resource group names, or tags that have been applied to instances.

Example 1: Specify instance IDs

Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

Example 2: Use tag key-pairs applied to instances

Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

Example 3: Use tag-keys applied to instances

Key=tag-key,Values=my-tag-key-1,my-tag-key-2

Example 4: Use resource group names

Key=resource-groups:Name,Values=resource-group-name

Example 5: Use filters for resource group types

Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For Key=resource-groups:ResourceTypeFilters, specify resource types in the following format

Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

For more information about these examples formats, including the best use case for each one, see Examples: Register Targets with a Maintenance Window in the AWS Systems Manager User Guide.

" + "documentation":"

The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs.

You can specify targets using instance IDs, resource group names, or tags that have been applied to instances.

Example 1: Specify instance IDs

Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

Example 2: Use tag key-pairs applied to instances

Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

Example 3: Use tag-keys applied to instances

Key=tag-key,Values=my-tag-key-1,my-tag-key-2

Example 4: Use resource group names

Key=resource-groups:Name,Values=resource-group-name

Example 5: Use filters for resource group types

Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For Key=resource-groups:ResourceTypeFilters, specify resource types in the following format

Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

For more information about these examples formats, including the best use case for each one, see Examples: Register targets with a maintenance window in the AWS Systems Manager User Guide.

" }, "OwnerInformation":{ "shape":"OwnerInformation", @@ -11735,7 +11833,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The ARN of the IAM service role for Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

For more information, see the following topics in the in the AWS Systems Manager User Guide:

" + "documentation":"

The ARN of the IAM service role for Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

For more information, see the following topics in the in the AWS Systems Manager User Guide:

" }, "TaskType":{ "shape":"MaintenanceWindowTaskType", @@ -11764,7 +11862,7 @@ }, "LoggingInfo":{ "shape":"LoggingInfo", - "documentation":"

A structure containing information about an Amazon S3 bucket to write instance-level logs to.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" + "documentation":"

A structure containing information about an S3 bucket to write instance-level logs to.

LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters.

" }, "Name":{ "shape":"MaintenanceWindowName", @@ -11849,7 +11947,7 @@ "members":{ "SettingId":{ "shape":"ServiceSettingId", - "documentation":"

The ID of the service setting to reset.

" + "documentation":"

The Amazon Resource Name (ARN) of the service setting to reset. The setting ID can be /ssm/parameter-store/default-parameter-tier, /ssm/parameter-store/high-throughput-enabled, or /ssm/managed-instance/activation-tier. For example, arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled.

" } }, "documentation":"

The request body of the ResetServiceSetting API action.

" @@ -11976,7 +12074,7 @@ "documentation":"

The sharing data type. Only Organization is supported.

" } }, - "documentation":"

Synchronize Systems Manager Inventory data from multiple AWS accounts defined in AWS Organizations to a centralized Amazon S3 bucket. Data is synchronized to individual key prefixes in the central bucket. Each key prefix represents a different AWS account ID.

" + "documentation":"

Synchronize Systems Manager Inventory data from multiple AWS accounts defined in AWS Organizations to a centralized S3 bucket. Data is synchronized to individual key prefixes in the central bucket. Each key prefix represents a different AWS account ID.

" }, "ResourceDataSyncDestinationDataSharingType":{ "type":"string", @@ -12001,7 +12099,7 @@ }, "SyncType":{ "shape":"ResourceDataSyncType", - "documentation":"

The type of resource data sync. If SyncType is SyncToDestination, then the resource data sync synchronizes data to an Amazon S3 bucket. If the SyncType is SyncFromSource then the resource data sync synchronizes data from AWS Organizations or from multiple AWS Regions.

" + "documentation":"

The type of resource data sync. If SyncType is SyncToDestination, then the resource data sync synchronizes data to an S3 bucket. If the SyncType is SyncFromSource then the resource data sync synchronizes data from AWS Organizations or from multiple AWS Regions.

" }, "SyncSource":{ "shape":"ResourceDataSyncSourceWithState", @@ -12009,7 +12107,7 @@ }, "S3Destination":{ "shape":"ResourceDataSyncS3Destination", - "documentation":"

Configuration information for the target Amazon S3 bucket.

" + "documentation":"

Configuration information for the target S3 bucket.

" }, "LastSyncTime":{ "shape":"LastResourceDataSyncTime", @@ -12100,7 +12198,7 @@ "members":{ "BucketName":{ "shape":"ResourceDataSyncS3BucketName", - "documentation":"

The name of the Amazon S3 bucket where the aggregated data is stored.

" + "documentation":"

The name of the S3 bucket where the aggregated data is stored.

" }, "Prefix":{ "shape":"ResourceDataSyncS3Prefix", @@ -12112,18 +12210,18 @@ }, "Region":{ "shape":"ResourceDataSyncS3Region", - "documentation":"

The AWS Region with the Amazon S3 bucket targeted by the Resource Data Sync.

" + "documentation":"

The AWS Region with the S3 bucket targeted by the Resource Data Sync.

" }, "AWSKMSKeyARN":{ "shape":"ResourceDataSyncAWSKMSKeyARN", - "documentation":"

The ARN of an encryption key for a destination in Amazon S3. Must belong to the same Region as the destination Amazon S3 bucket.

" + "documentation":"

The ARN of an encryption key for a destination in Amazon S3. Must belong to the same Region as the destination S3 bucket.

" }, "DestinationDataSharing":{ "shape":"ResourceDataSyncDestinationDataSharing", "documentation":"

Enables destination data sharing. By default, this field is null.

" } }, - "documentation":"

Information about the target Amazon S3 bucket for the Resource Data Sync.

" + "documentation":"

Information about the target S3 bucket for the Resource Data Sync.

" }, "ResourceDataSyncS3Format":{ "type":"string", @@ -12229,7 +12327,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Error returned when the caller has exceeded the default resource quotas. For example, too many maintenance windows or patch baselines have been created.

For information about resource quotas in Systems Manager, see Systems Manager Service Quotas in the AWS General Reference.

", + "documentation":"

Error returned when the caller has exceeded the default resource quotas. For example, too many maintenance windows or patch baselines have been created.

For information about resource quotas in Systems Manager, see Systems Manager service quotas in the AWS General Reference.

", "exception":true }, "ResourceType":{ @@ -12292,7 +12390,7 @@ }, "StreamUrl":{ "shape":"StreamUrl", - "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager Service Endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" + "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager service endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" } } }, @@ -12310,28 +12408,28 @@ "members":{ "OutputS3Region":{ "shape":"S3Region", - "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon S3 bucket region.

" + "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Region of the S3 bucket.

" }, "OutputS3BucketName":{ "shape":"S3BucketName", - "documentation":"

The name of the Amazon S3 bucket.

" + "documentation":"

The name of the S3 bucket.

" }, "OutputS3KeyPrefix":{ "shape":"S3KeyPrefix", - "documentation":"

The Amazon S3 bucket subfolder.

" + "documentation":"

The S3 bucket subfolder.

" } }, - "documentation":"

An Amazon S3 bucket where you want to store the results of this request.

" + "documentation":"

An S3 bucket where you want to store the results of this request.

" }, "S3OutputUrl":{ "type":"structure", "members":{ "OutputUrl":{ "shape":"Url", - "documentation":"

A URL for an Amazon S3 bucket where you want to store the results of this request.

" + "documentation":"

A URL for an S3 bucket where you want to store the results of this request.

" } }, - "documentation":"

A URL for the Amazon S3 bucket where you want to store the results of this request.

" + "documentation":"

A URL for the S3 bucket where you want to store the results of this request.

" }, "S3Region":{ "type":"string", @@ -12397,11 +12495,11 @@ "members":{ "InstanceIds":{ "shape":"InstanceIdList", - "documentation":"

The instance IDs where the command should run. You can specify a maximum of 50 IDs. If you prefer not to list individual instance IDs, you can instead send commands to a fleet of instances using the Targets parameter, which accepts EC2 tags. For more information about how to use targets, see Sending Commands to a Fleet in the AWS Systems Manager User Guide.

" + "documentation":"

The instance IDs where the command should run. You can specify a maximum of 50 IDs. If you prefer not to list individual instance IDs, you can instead send commands to a fleet of instances using the Targets parameter, which accepts EC2 tags. For more information about how to use targets, see Using targets and rate controls to send commands to a fleet in the AWS Systems Manager User Guide.

" }, "Targets":{ "shape":"Targets", - "documentation":"

(Optional) An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. For more information about how to use targets, see Sending Commands to a Fleet in the AWS Systems Manager User Guide.

" + "documentation":"

(Optional) An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. For more information about how to use targets, see Sending commands to a fleet in the AWS Systems Manager User Guide.

" }, "DocumentName":{ "shape":"DocumentARN", @@ -12434,7 +12532,7 @@ }, "OutputS3Region":{ "shape":"S3Region", - "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon S3 bucket region.

" + "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Region of the S3 bucket.

" }, "OutputS3BucketName":{ "shape":"S3BucketName", @@ -12446,11 +12544,11 @@ }, "MaxConcurrency":{ "shape":"MaxConcurrency", - "documentation":"

(Optional) The maximum number of instances that are allowed to run the command at the same time. You can specify a number such as 10 or a percentage such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Using Concurrency Controls in the AWS Systems Manager User Guide.

" + "documentation":"

(Optional) The maximum number of instances that are allowed to run the command at the same time. You can specify a number such as 10 or a percentage such as 10%. The default value is 50. For more information about how to use MaxConcurrency, see Using concurrency controls in the AWS Systems Manager User Guide.

" }, "MaxErrors":{ "shape":"MaxErrors", - "documentation":"

The maximum number of errors allowed without the command failing. When the command fails one more time beyond the value of MaxErrors, the systems stops sending the command to additional targets. You can specify a number like 10 or a percentage like 10%. The default value is 0. For more information about how to use MaxErrors, see Using Error Controls in the AWS Systems Manager User Guide.

" + "documentation":"

The maximum number of errors allowed without the command failing. When the command fails one more time beyond the value of MaxErrors, the systems stops sending the command to additional targets. You can specify a number like 10 or a percentage like 10%. The default value is 0. For more information about how to use MaxErrors, see Using error controls in the AWS Systems Manager User Guide.

" }, "ServiceRoleArn":{ "shape":"ServiceRole", @@ -12818,7 +12916,7 @@ }, "TargetLocations":{ "shape":"TargetLocations", - "documentation":"

A location is a combination of AWS Regions and/or AWS accounts where you want to run the Automation. Use this action to start an Automation in multiple Regions and multiple accounts. For more information, see Executing Automations in Multiple AWS Regions and Accounts in the AWS Systems Manager User Guide.

", + "documentation":"

A location is a combination of AWS Regions and/or AWS accounts where you want to run the Automation. Use this action to start an Automation in multiple Regions and multiple accounts. For more information, see Running Automation workflows in multiple AWS Regions and accounts in the AWS Systems Manager User Guide.

", "box":true }, "Tags":{ @@ -12846,7 +12944,7 @@ }, "DocumentName":{ "shape":"DocumentARN", - "documentation":"

The name of the SSM document to define the parameters and plugin settings for the session. For example, SSM-SessionManagerRunShell. If no document name is provided, a shell to the instance is launched by default.

" + "documentation":"

The name of the SSM document to define the parameters and plugin settings for the session. For example, SSM-SessionManagerRunShell. You can call the GetDocument API to verify the document exists before attempting to start a session. If no document name is provided, a shell to the instance is launched by default.

" }, "Parameters":{ "shape":"SessionManagerParameters", @@ -12867,7 +12965,7 @@ }, "StreamUrl":{ "shape":"StreamUrl", - "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager Service Endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" + "documentation":"

A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)

region represents the Region identifier for an AWS Region supported by AWS Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list of supported region values, see the Region column in Systems Manager service endpoints in the AWS General Reference.

session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE.

" } } }, @@ -13132,10 +13230,10 @@ }, "Values":{ "shape":"TargetValues", - "documentation":"

User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to run a command on instances that include Amazon EC2 tags of ServerRole,WebServer.

" + "documentation":"

User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to run a command on instances that include EC2 tags of ServerRole,WebServer.

" } }, - "documentation":"

An array of search criteria that targets instances using a Key,Value combination that you specify.

Supported formats include the following.

  • Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

  • Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

  • Key=tag-key,Values=my-tag-key-1,my-tag-key-2

  • (Maintenance window targets only) Key=resource-groups:Name,Values=resource-group-name

  • (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For example:

  • Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE

  • Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3

  • Key=tag-key,Values=Name,Instance-Type,CostCenter

  • (Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup

    This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.

  • (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

    This example demonstrates how to target only Amazon EC2 instances and VPCs in your maintenance window.

  • (State Manager association targets only) Key=InstanceIds,Values=*

    This example demonstrates how to target all managed instances in the AWS Region where the association was created.

For information about how to send commands that target instances using Key,Value parameters, see Using Targets and Rate Controls to Send Commands to a Fleet in the AWS Systems Manager User Guide.

" + "documentation":"

An array of search criteria that targets instances using a Key,Value combination that you specify.

Supported formats include the following.

  • Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

  • Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

  • Key=tag-key,Values=my-tag-key-1,my-tag-key-2

  • (Maintenance window targets only) Key=resource-groups:Name,Values=resource-group-name

  • (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For example:

  • Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE

  • Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3

  • Key=tag-key,Values=Name,Instance-Type,CostCenter

  • (Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup

    This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.

  • (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

    This example demonstrates how to target only EC2 instances and VPCs in your maintenance window.

  • (State Manager association targets only) Key=InstanceIds,Values=*

    This example demonstrates how to target all managed instances in the AWS Region where the association was created.

For information about how to send commands that target instances using Key,Value parameters, see Targeting multiple instances in the AWS Systems Manager User Guide.

" }, "TargetCount":{"type":"integer"}, "TargetInUseException":{ @@ -13221,7 +13319,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The specified target instance for the session is not fully configured for use with Session Manager. For more information, see Getting Started with Session Manager in the AWS Systems Manager User Guide.

", + "documentation":"

The specified target instance for the session is not fully configured for use with Session Manager. For more information, see Getting started with Session Manager in the AWS Systems Manager User Guide.

", "exception":true }, "TargetParameterList":{ @@ -13312,7 +13410,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

Microsoft application patching is only available on EC2 instances and Advanced Instances. To patch Microsoft applications on on-premises servers and VMs, you must enable Advanced Instances. For more information, see Using the Advanced-Instances Tier in the AWS Systems Manager User Guide.

", + "documentation":"

Microsoft application patching is only available on EC2 instances and advanced instances. To patch Microsoft applications on on-premises servers and VMs, you must enable advanced instances. For more information, see Using the advanced-instances tier in the AWS Systems Manager User Guide.

", "exception":true }, "UnsupportedInventoryItemContextException":{ @@ -13378,7 +13476,7 @@ }, "OutputLocation":{ "shape":"InstanceAssociationOutputLocation", - "documentation":"

An Amazon S3 bucket where you want to store the results of this request.

" + "documentation":"

An S3 bucket where you want to store the results of this request.

" }, "Name":{ "shape":"DocumentARN", @@ -13411,6 +13509,14 @@ "ComplianceSeverity":{ "shape":"AssociationComplianceSeverity", "documentation":"

The severity level to assign to the association.

" + }, + "SyncCompliance":{ + "shape":"AssociationSyncCompliance", + "documentation":"

The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT.

In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.

By default, all associations use AUTO mode.

" + }, + "ApplyOnlyAtCronInterval":{ + "shape":"ApplyOnlyAtCronInterval", + "documentation":"

By default, when you update an association, the system runs it immediately after it is updated and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you update it.

Also, if you specified this option when you created the association, you can reset it. To do so, specify the no-apply-only-at-cron-interval parameter when you update the association from the command line. This parameter forces the association to run immediately after updating it and according to the interval specified.

" } } }, @@ -13505,7 +13611,7 @@ }, "DocumentVersion":{ "shape":"DocumentVersion", - "documentation":"

(Required) The version of the document that you want to update.

" + "documentation":"

(Required) The latest version of the document that you want to update. The latest document version can be specified using the $LATEST variable or by the version number. Updating a previous version of a document is not supported.

" }, "DocumentFormat":{ "shape":"DocumentFormat", @@ -13558,6 +13664,11 @@ "shape":"MaintenanceWindowTimezone", "documentation":"

The time zone that the scheduled maintenance window executions are based on, in Internet Assigned Numbers Authority (IANA) format. For example: \"America/Los_Angeles\", \"etc/UTC\", or \"Asia/Seoul\". For more information, see the Time Zone Database on the IANA website.

" }, + "ScheduleOffset":{ + "shape":"MaintenanceWindowOffset", + "documentation":"

The number of days to wait after the date and time specified by a CRON expression before running the maintenance window.

For example, the following cron expression schedules a maintenance window to run the third Tuesday of every month at 11:30 PM.

cron(0 30 23 ? * TUE#3 *)

If the schedule offset is 2, the maintenance window won't run until two days later.

", + "box":true + }, "Duration":{ "shape":"MaintenanceWindowDurationHours", "documentation":"

The duration of the maintenance window in hours.

", @@ -13616,6 +13727,11 @@ "shape":"MaintenanceWindowTimezone", "documentation":"

The time zone that the scheduled maintenance window executions are based on, in Internet Assigned Numbers Authority (IANA) format. For example: \"America/Los_Angeles\", \"etc/UTC\", or \"Asia/Seoul\". For more information, see the Time Zone Database on the IANA website.

" }, + "ScheduleOffset":{ + "shape":"MaintenanceWindowOffset", + "documentation":"

The number of days to wait to run a maintenance window after the scheduled CRON expression date and time.

", + "box":true + }, "Duration":{ "shape":"MaintenanceWindowDurationHours", "documentation":"

The duration of the maintenance window in hours.

" @@ -13726,7 +13842,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"

The ARN of the IAM service role for Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

For more information, see the following topics in the in the AWS Systems Manager User Guide:

" + "documentation":"

The ARN of the IAM service role for Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses your account's service-linked role. If no service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.

For more information, see the following topics in the in the AWS Systems Manager User Guide:

" }, "TaskParameters":{ "shape":"MaintenanceWindowTaskParameters", @@ -13857,7 +13973,7 @@ }, "OperationalData":{ "shape":"OpsItemOperationalData", - "documentation":"

Add new keys or edit existing key-value pairs of the OperationalData map in the OpsItem object.

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems Manually in the AWS Systems Manager User Guide.

" + "documentation":"

Add new keys or edit existing key-value pairs of the OperationalData map in the OpsItem object.

Operational data is custom data that provides useful reference details about the OpsItem. For example, you can specify log files, error strings, license keys, troubleshooting tips, or other relevant data. You enter operational data as key-value pairs. The key has a maximum length of 128 characters. The value has a maximum size of 20 KB.

Operational data keys can't begin with the following: amazon, aws, amzn, ssm, /amazon, /aws, /amzn, /ssm.

You can choose to make the data searchable by other users in the account or you can restrict search access. Searchable data means that all users with access to the OpsItem Overview page (as provided by the DescribeOpsItems API action) can view and search on the specified data. Operational data that is not searchable is only viewable by users who have access to the OpsItem (as provided by the GetOpsItem API action).

Use the /aws/resources key in OperationalData to specify a related resource in the request. Use the /aws/automations key in OperationalData to associate an Automation runbook with the OpsItem. To view AWS CLI example commands that use these keys, see Creating OpsItems manually in the AWS Systems Manager User Guide.

" }, "OperationalDataToDelete":{ "shape":"OpsItemOpsDataKeysList", @@ -13877,7 +13993,7 @@ }, "Status":{ "shape":"OpsItemStatus", - "documentation":"

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem Details in the AWS Systems Manager User Guide.

" + "documentation":"

The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem details in the AWS Systems Manager User Guide.

" }, "OpsItemId":{ "shape":"OpsItemId", @@ -13924,7 +14040,7 @@ }, "ApprovedPatches":{ "shape":"PatchIdList", - "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see Package Name Formats for Approved and Rejected Patch Lists in the AWS Systems Manager User Guide.

" + "documentation":"

A list of explicitly approved patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" }, "ApprovedPatchesComplianceLevel":{ "shape":"PatchComplianceLevel", @@ -13937,7 +14053,7 @@ }, "RejectedPatches":{ "shape":"PatchIdList", - "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see Package Name Formats for Approved and Rejected Patch Lists in the AWS Systems Manager User Guide.

" + "documentation":"

A list of explicitly rejected patches for the baseline.

For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the AWS Systems Manager User Guide.

" }, "RejectedPatchesAction":{ "shape":"PatchAction", @@ -14034,7 +14150,7 @@ }, "SyncType":{ "shape":"ResourceDataSyncType", - "documentation":"

The type of resource data sync. If SyncType is SyncToDestination, then the resource data sync synchronizes data to an Amazon S3 bucket. If the SyncType is SyncFromSource then the resource data sync synchronizes data from AWS Organizations or from multiple AWS Regions.

" + "documentation":"

The type of resource data sync. The supported SyncType is SyncFromSource.

" }, "SyncSource":{ "shape":"ResourceDataSyncSource", @@ -14056,11 +14172,11 @@ "members":{ "SettingId":{ "shape":"ServiceSettingId", - "documentation":"

The ID of the service setting to update.

" + "documentation":"

The Amazon Resource Name (ARN) of the service setting to reset. For example, arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled. The setting ID can be one of the following.

  • /ssm/parameter-store/default-parameter-tier

  • /ssm/parameter-store/high-throughput-enabled

  • /ssm/managed-instance/activation-tier

" }, "SettingValue":{ "shape":"ServiceSettingValue", - "documentation":"

The new value to specify for the service setting.

" + "documentation":"

The new value to specify for the service setting. For the /ssm/parameter-store/default-parameter-tier setting ID, the setting value can be one of the following.

  • Standard

  • Advanced

  • Intelligent-Tiering

For the /ssm/parameter-store/high-throughput-enabled, and /ssm/managed-instance/activation-tier setting IDs, the setting value can be true or false.

" } }, "documentation":"

The request body of the UpdateServiceSetting API action.

" @@ -14086,5 +14202,5 @@ "pattern":"^[0-9]{1,6}(\\.[0-9]{1,6}){2,3}$" } }, - "documentation":"AWS Systems Manager

AWS Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. Systems Manager lets you remotely and securely manage the configuration of your managed instances. A managed instance is any Amazon EC2 instance or on-premises machine in your hybrid environment that has been configured for Systems Manager.

This reference is intended to be used with the AWS Systems Manager User Guide.

To get started, verify prerequisites and configure managed instances. For more information, see Setting Up AWS Systems Manager in the AWS Systems Manager User Guide.

For information about other API actions you can perform on Amazon EC2 instances, see the Amazon EC2 API Reference. For information about how to use a Query API, see Making API Requests.

" + "documentation":"AWS Systems Manager

AWS Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. Systems Manager lets you remotely and securely manage the configuration of your managed instances. A managed instance is any Amazon Elastic Compute Cloud instance (EC2 instance), or any on-premises server or virtual machine (VM) in your hybrid environment that has been configured for Systems Manager.

This reference is intended to be used with the AWS Systems Manager User Guide.

To get started, verify prerequisites and configure managed instances. For more information, see Setting up AWS Systems Manager in the AWS Systems Manager User Guide.

For information about other API actions you can perform on EC2 instances, see the Amazon EC2 API Reference. For information about how to use a Query API, see Making API requests.

" } diff --git a/services/sso/pom.xml b/services/sso/pom.xml index e7c5ff84a308..80001b9238f2 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT sso AWS Java SDK :: Services :: SSO diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index c1322b3a98e8..5d4a62e738b7 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index 231c2f150b20..d819a5635058 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/storagegateway/src/main/resources/codegen-resources/service-2.json b/services/storagegateway/src/main/resources/codegen-resources/service-2.json index 0ecdfba9907f..2bb4f9ffd83c 100644 --- a/services/storagegateway/src/main/resources/codegen-resources/service-2.json +++ b/services/storagegateway/src/main/resources/codegen-resources/service-2.json @@ -24,7 +24,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the AWS Region that you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

" + "documentation":"

Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the AWS Region that you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account. For more information, see UpdateGatewayInformation.

You must turn on the gateway VM before you can activate your gateway.

" }, "AddCache":{ "name":"AddCache", @@ -38,7 +38,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Configures one or more gateway local disks as cache for a gateway. This operation is only supported in the cached volume, tape and file gateway type (see Storage Gateway Concepts).

In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add cache, and one or more disk IDs that you want to configure as cache.

" + "documentation":"

Configures one or more gateway local disks as cache for a gateway. This operation is only supported in the cached volume, tape, and file gateway type (see How AWS Storage Gateway works (architecture).

In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add cache, and one or more disk IDs that you want to configure as cache.

" }, "AddTagsToResource":{ "name":"AddTagsToResource", @@ -94,7 +94,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Assigns a tape to a tape pool for archiving. The tape assigned to a pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the S3 storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

Assigns a tape to a tape pool for archiving. The tape assigned to a pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the S3 storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" }, "AttachVolume":{ "name":"AttachVolume", @@ -150,7 +150,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a cached volume on a specified cached volume gateway. This operation is only supported in the cached volume gateway type.

Cache storage must be allocated to the gateway before you can create a cached volume. Use the AddCache operation to add cache storage to a gateway.

In the request, you must specify the gateway, size of the volume in bytes, the iSCSI target name, an IP address on which to expose the target, and a unique client token. In response, the gateway creates the volume and returns information about it. This information includes the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

Optionally, you can provide the ARN for an existing volume as the SourceVolumeARN for this cached volume, which creates an exact copy of the existing volume’s latest recovery point. The VolumeSizeInBytes value must be equal to or larger than the size of the copied volume, in bytes.

" + "documentation":"

Creates a cached volume on a specified cached volume gateway. This operation is only supported in the cached volume gateway type.

Cache storage must be allocated to the gateway before you can create a cached volume. Use the AddCache operation to add cache storage to a gateway.

In the request, you must specify the gateway, size of the volume in bytes, the iSCSI target name, an IP address on which to expose the target, and a unique client token. In response, the gateway creates the volume and returns information about it. This information includes the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

Optionally, you can provide the ARN for an existing volume as the SourceVolumeARN for this cached volume, which creates an exact copy of the existing volume’s latest recovery point. The VolumeSizeInBytes value must be equal to or larger than the size of the copied volume, in bytes.

" }, "CreateNFSFileShare":{ "name":"CreateNFSFileShare", @@ -164,7 +164,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in the AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" + "documentation":"

Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using an NFS interface. This operation is only supported for file gateways.

File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in the AWS Region, activate it. For information about how to activate AWS STS, see Activating and deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateway does not support creating hard or symbolic links on a file share.

" }, "CreateSMBFileShare":{ "name":"CreateSMBFileShare", @@ -178,7 +178,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a Server Message Block (SMB) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway expose file shares using a SMB interface. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" + "documentation":"

Creates a Server Message Block (SMB) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway expose file shares using an SMB interface. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" }, "CreateSnapshot":{ "name":"CreateSnapshot", @@ -193,7 +193,7 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableError"} ], - "documentation":"

Initiates a snapshot of a volume.

AWS Storage Gateway provides the ability to back up point-in-time snapshots of your data to Amazon Simple Storage (S3) for durable off-site recovery, as well as import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad hoc basis. This API enables you to take ad-hoc snapshot. For more information, see Editing a Snapshot Schedule.

In the CreateSnapshot request you identify the volume by providing its Amazon Resource Name (ARN). You must also provide description for the snapshot. When AWS Storage Gateway takes the snapshot of specified volume, the snapshot and description appears in the AWS Storage Gateway Console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot. This operation is only supported in stored and cached volume gateway type.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the EC2 API reference.

Volume and snapshot IDs are changing to a longer length ID format. For more information, see the important note on the Welcome page.

" + "documentation":"

Initiates a snapshot of a volume.

AWS Storage Gateway provides the ability to back up point-in-time snapshots of your data to Amazon Simple Storage (Amazon S3) for durable off-site recovery, as well as import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad hoc basis. This API enables you to take ad-hoc snapshot. For more information, see Editing a snapshot schedule.

In the CreateSnapshot request you identify the volume by providing its Amazon Resource Name (ARN). You must also provide description for the snapshot. When AWS Storage Gateway takes the snapshot of specified volume, the snapshot and description appears in the AWS Storage Gateway Console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot. This operation is only supported in stored and cached volume gateway type.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the Amazon Elastic Compute Cloud API Reference.

Volume and snapshot IDs are changing to a longer length ID format. For more information, see the important note on the Welcome page.

" }, "CreateSnapshotFromVolumeRecoveryPoint":{ "name":"CreateSnapshotFromVolumeRecoveryPoint", @@ -208,7 +208,7 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableError"} ], - "documentation":"

Initiates a snapshot of a gateway from a volume recovery point. This operation is only supported in the cached volume gateway type.

A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot. To get a list of volume recovery point for cached volume gateway, use ListVolumeRecoveryPoints.

In the CreateSnapshotFromVolumeRecoveryPoint request, you identify the volume by providing its Amazon Resource Name (ARN). You must also provide a description for the snapshot. When the gateway takes a snapshot of the specified volume, the snapshot and its description appear in the AWS Storage Gateway console. In response, the gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, in Amazon Elastic Compute Cloud API Reference.

" + "documentation":"

Initiates a snapshot of a gateway from a volume recovery point. This operation is only supported in the cached volume gateway type.

A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot. To get a list of volume recovery point for cached volume gateway, use ListVolumeRecoveryPoints.

In the CreateSnapshotFromVolumeRecoveryPoint request, you identify the volume by providing its Amazon Resource Name (ARN). You must also provide a description for the snapshot. When the gateway takes a snapshot of the specified volume, the snapshot and its description appear in the AWS Storage Gateway console. In response, the gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the Amazon Elastic Compute Cloud API Reference.

" }, "CreateStorediSCSIVolume":{ "name":"CreateStorediSCSIVolume", @@ -236,7 +236,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. A barcode is unique and can not be reused if it has already been used on a tape . This applies to barcodes used on deleted tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.

" + "documentation":"

Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. A barcode is unique and can not be reused if it has already been used on a tape. This applies to barcodes used on deleted tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.

" }, "CreateTapes":{ "name":"CreateTapes", @@ -250,7 +250,21 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates one or more virtual tapes. You write data to the virtual tapes and then archive the tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create virtual tapes. Use the AddCache operation to add cache storage to a gateway.

" + "documentation":"

Creates one or more virtual tapes. You write data to the virtual tapes and then archive the tapes. This operation is only supported in the tape gateway type.

Cache storage must be allocated to the gateway before you can create virtual tapes. Use the AddCache operation to add cache storage to a gateway.

" + }, + "DeleteAutomaticTapeCreationPolicy":{ + "name":"DeleteAutomaticTapeCreationPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAutomaticTapeCreationPolicyInput"}, + "output":{"shape":"DeleteAutomaticTapeCreationPolicyOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Deletes the automatic tape creation policy of a gateway. If you delete this policy, new virtual tapes must be created manually. Use the Amazon Resource Name (ARN) of the gateway in your request to remove the policy.

" }, "DeleteBandwidthRateLimit":{ "name":"DeleteBandwidthRateLimit", @@ -306,7 +320,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name (ARN) of the gateway in your request. The operation deletes the gateway; however, it does not delete the gateway virtual machine (VM) from your host computer.

After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway volumes are not deleted upon deleting the gateway, however, pending snapshots will not complete. After you delete a gateway, your next step is to remove it from your environment.

You no longer pay software charges after the gateway is deleted; however, your existing Amazon EBS snapshots persist and you will continue to be billed for these snapshots. You can choose to remove all remaining Amazon EBS snapshots by canceling your Amazon EC2 subscription.  If you prefer not to cancel your Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 console. For more information, see the AWS Storage Gateway Detail Page.

" + "documentation":"

Deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name (ARN) of the gateway in your request. The operation deletes the gateway; however, it does not delete the gateway virtual machine (VM) from your host computer.

After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway volumes are not deleted upon deleting the gateway, however, pending snapshots will not complete. After you delete a gateway, your next step is to remove it from your environment.

You no longer pay software charges after the gateway is deleted; however, your existing Amazon EBS snapshots persist and you will continue to be billed for these snapshots. You can choose to remove all remaining Amazon EBS snapshots by canceling your Amazon EC2 subscription.  If you prefer not to cancel your Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 console. For more information, see the AWS Storage Gateway detail page.

" }, "DeleteSnapshotSchedule":{ "name":"DeleteSnapshotSchedule", @@ -320,7 +334,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes a snapshot of a volume.

You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API action enables you to delete a snapshot schedule for a volume. For more information, see Working with Snapshots. In the DeleteSnapshotSchedule request, you identify the volume by providing its Amazon Resource Name (ARN). This operation is only supported in stored and cached volume gateway types.

To list or delete a snapshot, you must use the Amazon EC2 API. in Amazon Elastic Compute Cloud API Reference.

" + "documentation":"

Deletes a snapshot of a volume.

You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API action enables you to delete a snapshot schedule for a volume. For more information, see Backing up your volumes. In the DeleteSnapshotSchedule request, you identify the volume by providing its Amazon Resource Name (ARN). This operation is only supported in stored and cached volume gateway types.

To list or delete a snapshot, you must use the Amazon EC2 API. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" }, "DeleteTape":{ "name":"DeleteTape", @@ -362,7 +376,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes the specified storage volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. This operation is only supported in the cached volume and stored volume types. For stored volume gateways, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.

Before you delete a volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.

" + "documentation":"

Deletes the specified storage volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. This operation is only supported in the cached volume and stored volume types. For stored volume gateways, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.

Before you delete a volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.

" }, "DescribeAvailabilityMonitorTest":{ "name":"DescribeAvailabilityMonitorTest", @@ -390,7 +404,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect. This operation is supported for the stored volume, cached volume and tape gateway types.'

This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

" + "documentation":"

Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect. This operation is supported for the stored volume, cached volume and tape gateway types.

This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

" }, "DescribeCache":{ "name":"DescribeCache", @@ -404,7 +418,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns information about the cache of a gateway. This operation is only supported in the cached volume, tape and file gateway types.

The response includes disk IDs that are configured as cache, and it includes the amount of cache allocated and used.

" + "documentation":"

Returns information about the cache of a gateway. This operation is only supported in the cached volume, tape, and file gateway types.

The response includes disk IDs that are configured as cache, and it includes the amount of cache allocated and used.

" }, "DescribeCachediSCSIVolumes":{ "name":"DescribeCachediSCSIVolumes", @@ -418,7 +432,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns a description of the gateway volumes specified in the request. This operation is only supported in the cached volume gateway types.

The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN).

" + "documentation":"

Returns a description of the gateway volumes specified in the request. This operation is only supported in the cached volume gateway types.

The list of gateway volumes in the request must be from one gateway. In the response, AWS Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN).

" }, "DescribeChapCredentials":{ "name":"DescribeChapCredentials", @@ -530,7 +544,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns the description of the gateway volumes specified in the request. The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume ARNs. This operation is only supported in stored volume gateway type.

" + "documentation":"

Returns the description of the gateway volumes specified in the request. The list of gateway volumes in the request must be from one gateway. In the response, AWS Storage Gateway returns volume information sorted by volume ARNs. This operation is only supported in stored volume gateway type.

" }, "DescribeTapeArchives":{ "name":"DescribeTapeArchives", @@ -586,7 +600,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns information about the upload buffer of a gateway. This operation is supported for the stored volume, cached volume and tape gateway types.

The response includes disk IDs that are configured as upload buffer space, and it includes the amount of upload buffer space allocated and used.

" + "documentation":"

Returns information about the upload buffer of a gateway. This operation is supported for the stored volume, cached volume, and tape gateway types.

The response includes disk IDs that are configured as upload buffer space, and it includes the amount of upload buffer space allocated and used.

" }, "DescribeVTLDevices":{ "name":"DescribeVTLDevices", @@ -642,7 +656,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Disables a tape gateway when the gateway is no longer functioning. For example, if your gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.

Use this operation for a tape gateway that is not reachable or not functioning. This operation is only supported in the tape gateway type.

Once a gateway is disabled it cannot be enabled.

" + "documentation":"

Disables a tape gateway when the gateway is no longer functioning. For example, if your gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.

Use this operation for a tape gateway that is not reachable or not functioning. This operation is only supported in the tape gateway type.

After a gateway is disabled, it cannot be enabled.

" }, "JoinDomain":{ "name":"JoinDomain", @@ -658,6 +672,20 @@ ], "documentation":"

Adds a file gateway to an Active Directory domain. This operation is only supported for file gateways that support the SMB file protocol.

" }, + "ListAutomaticTapeCreationPolicies":{ + "name":"ListAutomaticTapeCreationPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAutomaticTapeCreationPoliciesInput"}, + "output":{"shape":"ListAutomaticTapeCreationPoliciesOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Lists the automatic tape creation policies for a gateway. If there are no automatic tape creation policies for the gateway, it returns an empty list.

This operation is only supported for tape gateways.

" + }, "ListFileShares":{ "name":"ListFileShares", "http":{ @@ -782,7 +810,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

" + "documentation":"

Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.

AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

For more information, see Getting file upload notification in the AWS Storage Gateway User Guide.

" }, "RefreshCache":{ "name":"RefreshCache", @@ -796,7 +824,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed or replaced since the gateway last listed the bucket's contents and cached the results. This operation is only supported in the file gateway type. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting Notified About File Operations.

When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through an CloudWatch event when your RefreshCache operation completes.

Throttle limit: This API is asynchronous so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting Notified About File Operations.

If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server.

For more information, see \"https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification\".

" + "documentation":"

Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed, or replaced since the gateway last listed the bucket's contents and cached the results. This operation is only supported in the file gateway type. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting notified about file operations in the AWS Storage Gateway User Guide.

When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through an CloudWatch event when your RefreshCache operation completes.

Throttle limit: This API is asynchronous so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting notified about file operations in the AWS Storage Gateway User Guide.

If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server.

For more information, see Getting notified about file operations in the AWS Storage Gateway User Guide.

" }, "RemoveTagsFromResource":{ "name":"RemoveTagsFromResource", @@ -824,7 +852,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Resets all cache disks that have encountered a error and makes the disks available for reconfiguration as cache storage. If your cache disk encounters a error, the gateway prevents read and write operations on virtual tapes in the gateway. For example, an error can occur when a disk is corrupted or removed from the gateway. When a cache is reset, the gateway loses its cache storage. At this point you can reconfigure the disks as cache disks. This operation is only supported in the cached volume and tape types.

If the cache disk you are resetting contains data that has not been uploaded to Amazon S3 yet, that data can be lost. After you reset cache disks, there will be no configured cache disks left in the gateway, so you must configure at least one new cache disk for your gateway to function properly.

" + "documentation":"

Resets all cache disks that have encountered an error and makes the disks available for reconfiguration as cache storage. If your cache disk encounters an error, the gateway prevents read and write operations on virtual tapes in the gateway. For example, an error can occur when a disk is corrupted or removed from the gateway. When a cache is reset, the gateway loses its cache storage. At this point, you can reconfigure the disks as cache disks. This operation is only supported in the cached volume and tape types.

If the cache disk you are resetting contains data that has not been uploaded to Amazon S3 yet, that data can be lost. After you reset cache disks, there will be no configured cache disks left in the gateway, so you must configure at least one new cache disk for your gateway to function properly.

" }, "RetrieveTapeArchive":{ "name":"RetrieveTapeArchive", @@ -908,7 +936,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Start a test that verifies that the specified gateway is configured for High Availability monitoring in your host environment. This request only initiates the test and that a successful response only indicates that the test was started. It doesn't indicate that the test passed. For the status of the test, invoke the DescribeAvailabilityMonitorTest API.

Starting this test will cause your gateway to go offline for a brief period.

" + "documentation":"

Start a test that verifies that the specified gateway is configured for High Availability monitoring in your host environment. This request only initiates the test and that a successful response only indicates that the test was started. It doesn't indicate that the test passed. For the status of the test, invoke the DescribeAvailabilityMonitorTest API.

Starting this test will cause your gateway to go offline for a brief period.

" }, "StartGateway":{ "name":"StartGateway", @@ -924,6 +952,20 @@ ], "documentation":"

Starts a gateway that you previously shut down (see ShutdownGateway). After the gateway starts, you can then make other API calls, your applications can read from or write to the gateway's storage volumes and you will be able to take snapshot backups.

When you make a request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to be ready. You should call DescribeGatewayInformation and check the status before making any additional API calls. For more information, see ActivateGateway.

To specify which gateway to start, use the Amazon Resource Name (ARN) of the gateway in your request.

" }, + "UpdateAutomaticTapeCreationPolicy":{ + "name":"UpdateAutomaticTapeCreationPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAutomaticTapeCreationPolicyInput"}, + "output":{"shape":"UpdateAutomaticTapeCreationPolicyOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Updates the automatic tape creation policy of a gateway. Use this to update the policy with a new set of automatic tape creation rules. This is only supported for tape gateways.

By default, there is no automatic tape creation policy.

A gateway can have only one automatic tape creation policy.

" + }, "UpdateBandwidthRateLimit":{ "name":"UpdateBandwidthRateLimit", "http":{ @@ -936,7 +978,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains. This operation is supported for the stored volume, cached volume and tape gateway types.'

By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

" + "documentation":"

Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains. This operation is supported for the stored volume, cached volume, and tape gateway types.

By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

" }, "UpdateChapCredentials":{ "name":"UpdateChapCredentials", @@ -978,7 +1020,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.

When you make this request, you get a 200 OK success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING state.

A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see Customizing Your Windows iSCSI Settings and Customizing Your Linux iSCSI Settings, respectively.

" + "documentation":"

Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.

When you make this request, you get a 200 OK success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING state.

A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see Customizing your Windows iSCSI settings and Customizing your Linux iSCSI settings, respectively.

" }, "UpdateMaintenanceStartTime":{ "name":"UpdateMaintenanceStartTime", @@ -1020,7 +1062,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates a Server Message Block (SMB) file share.

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" + "documentation":"

Updates a Server Message Block (SMB) file share.

To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported for file gateways.

File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see Activating and deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

File gateways don't support creating hard or symbolic links on a file share.

" }, "UpdateSMBSecurityStrategy":{ "name":"UpdateSMBSecurityStrategy", @@ -1077,7 +1119,7 @@ "members":{ "ActivationKey":{ "shape":"ActivationKey", - "documentation":"

Your gateway activation key. You can obtain the activation key by sending an HTTP GET request with redirects enabled to the gateway IP address (port 80). The redirect URL returned in the response provides you the activation key for your gateway in the query string parameter activationKey. It may also include other activation-related parameters, however, these are merely defaults -- the arguments you pass to the ActivateGateway API call determine the actual configuration of your gateway.

For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/get-activation-key.html in the Storage Gateway User Guide.

" + "documentation":"

Your gateway activation key. You can obtain the activation key by sending an HTTP GET request with redirects enabled to the gateway IP address (port 80). The redirect URL returned in the response provides you the activation key for your gateway in the query string parameter activationKey. It may also include other activation-related parameters, however, these are merely defaults -- the arguments you pass to the ActivateGateway API call determine the actual configuration of your gateway.

For more information, see Getting activation key in the AWS Storage Gateway User Guide.

" }, "GatewayName":{ "shape":"GatewayName", @@ -1089,26 +1131,26 @@ }, "GatewayRegion":{ "shape":"RegionId", - "documentation":"

A value that indicates the AWS Region where you want to store your data. The gateway AWS Region specified must be the same AWS Region as the AWS Region in your Host header in the request. For more information about available AWS Regions and endpoints for AWS Storage Gateway, see Regions and Endpoints in the Amazon Web Services Glossary.

Valid Values: See AWS Storage Gateway Regions and Endpoints in the AWS General Reference.

" + "documentation":"

A value that indicates the AWS Region where you want to store your data. The gateway AWS Region specified must be the same AWS Region as the AWS Region in your Host header in the request. For more information about available AWS Regions and endpoints for AWS Storage Gateway, see AWS Storage Gateway endpoints and quotas in the AWS General Reference.

Valid Values: See AWS Storage Gateway endpoints and quotas in the AWS General Reference.

" }, "GatewayType":{ "shape":"GatewayType", - "documentation":"

A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED.

Valid Values: \"STORED\", \"CACHED\", \"VTL\", \"FILE_S3\"

" + "documentation":"

A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED.

Valid Values: STORED | CACHED | VTL | FILE_S3

" }, "TapeDriveType":{ "shape":"TapeDriveType", - "documentation":"

The value that indicates the type of tape drive to use for tape gateway. This field is optional.

Valid Values: \"IBM-ULT3580-TD5\"

" + "documentation":"

The value that indicates the type of tape drive to use for tape gateway. This field is optional.

Valid Values: IBM-ULT3580-TD5

" }, "MediumChangerType":{ "shape":"MediumChangerType", - "documentation":"

The value that indicates the type of medium changer to use for tape gateway. This field is optional.

Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"

" + "documentation":"

The value that indicates the type of medium changer to use for tape gateway. This field is optional.

Valid Values: STK-L700 | AWS-Gateway-VTL

" }, "Tags":{ "shape":"Tags", "documentation":"

A list of up to 50 tags that you can assign to the gateway. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers that can be represented in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256 characters.

" } }, - "documentation":"

A JSON object containing one or more of the following fields:

" + "documentation":"

A JSON object containing one or more of the following fields:

" }, "ActivateGatewayOutput":{ "type":"structure", @@ -1144,7 +1186,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "DiskIds":{ "shape":"DiskIds", - "documentation":"

An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

" + "documentation":"

An array of strings that identify disks that are to be configured as working storage. Each string has a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

" } } }, @@ -1192,7 +1234,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "DiskIds":{ "shape":"DiskIds", - "documentation":"

An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

" + "documentation":"

An array of strings that identify disks that are to be configured as working storage. Each string has a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

" } } }, @@ -1212,7 +1254,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "DiskIds":{ "shape":"DiskIds", - "documentation":"

An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

" + "documentation":"

An array of strings that identify disks that are to be configured as working storage. Each string has a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

" } }, "documentation":"

A JSON object containing one or more of the following fields:

" @@ -1222,7 +1264,7 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway for which working storage was configured.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway for which working storage was configured.

" }, "AssignTapePoolInput":{ "type":"structure", @@ -1237,7 +1279,7 @@ }, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" } } }, @@ -1272,7 +1314,7 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" + "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" }, "DiskId":{ "shape":"DiskId", @@ -1301,10 +1343,59 @@ }, "Authentication":{ "type":"string", - "documentation":"

The authentication method of the file share.

Valid values are ActiveDirectory or GuestAccess. The default is ActiveDirectory.

", + "documentation":"

The authentication method of the file share. The default is ActiveDirectory.

Valid Values: ActiveDirectory | GuestAccess

", "max":15, "min":5 }, + "AutomaticTapeCreationPolicyInfo":{ + "type":"structure", + "members":{ + "AutomaticTapeCreationRules":{ + "shape":"AutomaticTapeCreationRules", + "documentation":"

An automatic tape creation policy consists of a list of automatic tape creation rules. This returns the rules that determine when and how to automatically create new tapes.

" + }, + "GatewayARN":{"shape":"GatewayARN"} + }, + "documentation":"

Information about the gateway's automatic tape creation policies, including the automatic tape creation rules and the gateway that is using the policies.

" + }, + "AutomaticTapeCreationPolicyInfos":{ + "type":"list", + "member":{"shape":"AutomaticTapeCreationPolicyInfo"} + }, + "AutomaticTapeCreationRule":{ + "type":"structure", + "required":[ + "TapeBarcodePrefix", + "PoolId", + "TapeSizeInBytes", + "MinimumNumTapes" + ], + "members":{ + "TapeBarcodePrefix":{ + "shape":"TapeBarcodePrefix", + "documentation":"

A prefix that you append to the barcode of the virtual tape that you are creating. This prefix makes the barcode unique.

The prefix must be 1-4 characters in length and must be one of the uppercase letters from A to Z.

" + }, + "PoolId":{ + "shape":"PoolId", + "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the Amazon S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" + }, + "TapeSizeInBytes":{ + "shape":"TapeSize", + "documentation":"

The size, in bytes, of the virtual tape capacity.

" + }, + "MinimumNumTapes":{ + "shape":"MinimumNumTapes", + "documentation":"

The minimum number of available virtual tapes that the gateway maintains at all times. If the number of tapes on the gateway goes below this value, the gateway creates as many new tapes as are needed to have MinimumNumTapes on the gateway.

" + } + }, + "documentation":"

An automatic tape creation policy consists of automatic tape creation rules where each rule defines when and how to create new tapes.

" + }, + "AutomaticTapeCreationRules":{ + "type":"list", + "member":{"shape":"AutomaticTapeCreationRule"}, + "max":10, + "min":1 + }, "AvailabilityMonitorTestStatus":{ "type":"string", "enum":[ @@ -1327,6 +1418,17 @@ "min":51200 }, "Boolean":{"type":"boolean"}, + "CacheAttributes":{ + "type":"structure", + "members":{ + "CacheStaleTimeoutInSeconds":{ + "shape":"CacheStaleTimeoutInSeconds", + "documentation":"

Refreshes a file share's cache by using Time To Live (TTL). TTL is the length of time since the last refresh after which access to the directory would cause the file gateway to first refresh that directory's contents from the Amazon S3 bucket. The TTL duration is in seconds.

Valid Values: 300 to 2,592,000 seconds (5 minutes to 30 days)

" + } + }, + "documentation":"

Lists refresh cache information.

" + }, + "CacheStaleTimeoutInSeconds":{"type":"integer"}, "CachediSCSIVolume":{ "type":"structure", "members":{ @@ -1348,7 +1450,7 @@ }, "VolumeAttachmentStatus":{ "shape":"VolumeAttachmentStatus", - "documentation":"

A value that indicates whether a storage volume is attached to or detached from a gateway. For more information, see Moving Your Volumes to a Different Gateway.

" + "documentation":"

A value that indicates whether a storage volume is attached to or detached from a gateway. For more information, see Moving your volumes to a different gateway.

" }, "VolumeSizeInBytes":{ "shape":"long", @@ -1436,6 +1538,13 @@ }, "documentation":"

CancelRetrievalOutput

" }, + "CaseSensitivity":{ + "type":"string", + "enum":[ + "ClientSpecified", + "CaseSensitive" + ] + }, "ChapCredentials":{ "type":"list", "member":{"shape":"ChapInfo"} @@ -1445,7 +1554,7 @@ "members":{ "TargetARN":{ "shape":"TargetARN", - "documentation":"

The Amazon Resource Name (ARN) of the volume.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" + "documentation":"

The Amazon Resource Name (ARN) of the volume.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" }, "SecretToAuthenticateInitiator":{ "shape":"ChapSecret", @@ -1494,7 +1603,7 @@ }, "SnapshotId":{ "shape":"SnapshotId", - "documentation":"

The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new cached volume. Specify this field if you want to create the iSCSI storage volume from a snapshot otherwise do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" + "documentation":"

The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new cached volume. Specify this field if you want to create the iSCSI storage volume from a snapshot; otherwise, do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" }, "TargetName":{ "shape":"TargetName", @@ -1506,7 +1615,7 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" + "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" }, "ClientToken":{ "shape":"ClientToken", @@ -1514,11 +1623,11 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "Tags":{ "shape":"Tags", @@ -1562,51 +1671,59 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "Role":{ "shape":"Role", - "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.

" + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.

" }, "LocationARN":{ "shape":"LocationARN", - "documentation":"

The ARN of the backed storage used for storing file data.

" + "documentation":"

The ARN of the backend storage used for storing file data. A prefix name can be added to the S3 bucket name. It must end with a \"/\".

" }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list (ACL) permission for objects in the S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ClientList":{ "shape":"FileShareClientList", - "documentation":"

The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.

" + "documentation":"

The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.

" }, "Squash":{ "shape":"Squash", - "documentation":"

A value that maps a user to anonymous user. Valid options are the following:

  • RootSquash - Only root is mapped to anonymous user.

  • NoSquash - No one is mapped to anonymous user

  • AllSquash - Everyone is mapped to anonymous user.

" + "documentation":"

A value that maps a user to anonymous user.

Valid values are the following:

  • RootSquash: Only root is mapped to anonymous user.

  • NoSquash: No one is mapped to anonymous user.

  • AllSquash: Everyone is mapped to anonymous user.

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set the write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" }, "Tags":{ "shape":"Tags", "documentation":"

A list of up to 50 tags that can be assigned to the NFS file share. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256.

" + }, + "FileShareName":{ + "shape":"FileShareName", + "documentation":"

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN.

" + }, + "CacheAttributes":{ + "shape":"CacheAttributes", + "documentation":"

Refresh cache information.

" } }, "documentation":"

CreateNFSFileShareInput

" @@ -1616,7 +1733,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the newly created file share.

" + "documentation":"

The Amazon Resource Name (ARN) of the newly created file share.

" } }, "documentation":"

CreateNFSFileShareOutput

" @@ -1640,55 +1757,55 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "Role":{ "shape":"Role", - "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.

" + "documentation":"

The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.

" }, "LocationARN":{ "shape":"LocationARN", - "documentation":"

The ARN of the backed storage used for storing file data.

" + "documentation":"

The ARN of the backend storage used for storing file data. A prefix name can be added to the S3 bucket name. It must end with a \"/\".

" }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list (ACL) permission for objects in the S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set the write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" }, "SMBACLEnabled":{ "shape":"Boolean", - "documentation":"

Set this value to \"true to enable ACL (access control list) on the SMB file share. Set it to \"false\" to map file and directory permissions to the POSIX permissions.

For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html in the Storage Gateway User Guide.

" + "documentation":"

Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions.

For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the AWS Storage Gateway User Guide.

Valid Values: true | false

" }, "AdminUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users in the Active Directory that will be granted administrator privileges on the file share. These users can do all file operations as the super-user.

Use this option very carefully, because any user in this list can do anything they like on the file share, regardless of file permissions.

" + "documentation":"

A list of users or groups in the Active Directory that will be granted administrator privileges on the file share. These users can do all file operations as the super-user. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1.

Use this option very carefully, because any user in this list can do anything they like on the file share, regardless of file permissions.

" }, "ValidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "InvalidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "AuditDestinationARN":{ "shape":"AuditDestinationARN", @@ -1696,11 +1813,23 @@ }, "Authentication":{ "shape":"Authentication", - "documentation":"

The authentication method that users use to access the file share.

Valid values are ActiveDirectory or GuestAccess. The default is ActiveDirectory.

" + "documentation":"

The authentication method that users use to access the file share. The default is ActiveDirectory.

Valid Values: ActiveDirectory | GuestAccess

" + }, + "CaseSensitivity":{ + "shape":"CaseSensitivity", + "documentation":"

The case of an object name in an Amazon S3 bucket. For ClientSpecified, the client determines the case sensitivity. For CaseSensitive, the gateway determines the case sensitivity. The default value is ClientSpecified.

" }, "Tags":{ "shape":"Tags", "documentation":"

A list of up to 50 tags that can be assigned to the NFS file share. Each tag is a key-value pair.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag's key is 128 characters, and the maximum length for a tag's value is 256.

" + }, + "FileShareName":{ + "shape":"FileShareName", + "documentation":"

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN.

" + }, + "CacheAttributes":{ + "shape":"CacheAttributes", + "documentation":"

Refresh cache information.

" } }, "documentation":"

CreateSMBFileShareInput

" @@ -1710,7 +1839,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the newly created file share.

" + "documentation":"

The Amazon Resource Name (ARN) of the newly created file share.

" } }, "documentation":"

CreateSMBFileShareOutput

" @@ -1728,7 +1857,7 @@ }, "SnapshotDescription":{ "shape":"SnapshotDescription", - "documentation":"

Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field

" + "documentation":"

Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field.

" }, "Tags":{ "shape":"Tags", @@ -1766,7 +1895,7 @@ }, "SnapshotDescription":{ "shape":"SnapshotDescription", - "documentation":"

Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field

" + "documentation":"

Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the Description field, and in the AWS Storage Gateway snapshot Details pane, Description field.

" }, "Tags":{ "shape":"Tags", @@ -1806,11 +1935,11 @@ }, "SnapshotId":{ "shape":"SnapshotId", - "documentation":"

The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new stored volume. Specify this field if you want to create the iSCSI storage volume from a snapshot otherwise do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" + "documentation":"

The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new stored volume. Specify this field if you want to create the iSCSI storage volume from a snapshot; otherwise, do not include this field. To list snapshots for your account use DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

" }, "PreserveExistingData":{ "shape":"boolean", - "documentation":"

Specify this field as true if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.

Valid Values: true, false

" + "documentation":"

Set to true true if you want to preserve the data on the local disk. Otherwise, set to false to create an empty volume.

Valid Values: true | false

" }, "TargetName":{ "shape":"TargetName", @@ -1818,15 +1947,15 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" + "documentation":"

The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

Valid Values: A valid IP address.

" }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "Tags":{ "shape":"Tags", @@ -1867,7 +1996,7 @@ }, "TapeSizeInBytes":{ "shape":"TapeSize", - "documentation":"

The size, in bytes, of the virtual tape that you want to create.

The size must be aligned by gigabyte (1024*1024*1024 byte).

" + "documentation":"

The size, in bytes, of the virtual tape that you want to create.

The size must be aligned by gigabyte (1024*1024*1024 bytes).

" }, "TapeBarcode":{ "shape":"TapeBarcode", @@ -1875,15 +2004,15 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS Key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" }, "Tags":{ "shape":"Tags", @@ -1918,7 +2047,7 @@ }, "TapeSizeInBytes":{ "shape":"TapeSize", - "documentation":"

The size, in bytes, of the virtual tapes that you want to create.

The size must be aligned by gigabyte (1024*1024*1024 byte).

" + "documentation":"

The size, in bytes, of the virtual tapes that you want to create.

The size must be aligned by gigabyte (1024*1024*1024 bytes).

" }, "ClientToken":{ "shape":"ClientToken", @@ -1934,15 +2063,15 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" }, "Tags":{ "shape":"Tags", @@ -1972,6 +2101,19 @@ "max":6, "min":0 }, + "DeleteAutomaticTapeCreationPolicyInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DeleteAutomaticTapeCreationPolicyOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, "DeleteBandwidthRateLimitInput":{ "type":"structure", "required":[ @@ -1982,7 +2124,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "BandwidthType":{ "shape":"BandwidthType", - "documentation":"

One of the BandwidthType values that indicates the gateway bandwidth rate limit to delete.

Valid Values: Upload, Download, All.

" + "documentation":"

One of the BandwidthType values that indicates the gateway bandwidth rate limit to delete.

Valid Values: Upload | Download | All

" } }, "documentation":"

A JSON object containing the following fields:

" @@ -1992,7 +2134,7 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway whose bandwidth rate information was deleted.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway whose bandwidth rate information was deleted.

" }, "DeleteChapCredentialsInput":{ "type":"structure", @@ -2032,11 +2174,11 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the file share to be deleted.

" + "documentation":"

The Amazon Resource Name (ARN) of the file share to be deleted.

" }, "ForceDelete":{ "shape":"boolean", - "documentation":"

If this value is set to true, the operation deletes a file share immediately and aborts all data uploads to AWS. Otherwise, the file share is not deleted until all data is uploaded to AWS. This process aborts the data upload process, and the file share enters the FORCE_DELETING status.

" + "documentation":"

If this value is set to true, the operation deletes a file share immediately and aborts all data uploads to AWS. Otherwise, the file share is not deleted until all data is uploaded to AWS. This process aborts the data upload process, and the file share enters the FORCE_DELETING status.

Valid Values: true | false

" } }, "documentation":"

DeleteFileShareInput

" @@ -2046,7 +2188,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the deleted file share.

" + "documentation":"

The Amazon Resource Name (ARN) of the deleted file share.

" } }, "documentation":"

DeleteFileShareOutput

" @@ -2153,7 +2295,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the storage volume that was deleted. It is the same ARN you provided in the request.

" } }, - "documentation":"

A JSON object containing the of the storage volume that was deleted

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the storage volume that was deleted.

" }, "DescribeAvailabilityMonitorTestInput":{ "type":"structure", @@ -2182,7 +2324,7 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway.

" }, "DescribeBandwidthRateLimitOutput":{ "type":"structure", @@ -2212,11 +2354,11 @@ "GatewayARN":{"shape":"GatewayARN"}, "DiskIds":{ "shape":"DiskIds", - "documentation":"

An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

" + "documentation":"

An array of strings that identify disks that are to be configured as working storage. Each string has a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.

" }, "CacheAllocatedInBytes":{ "shape":"long", - "documentation":"

The amount of cache in bytes allocated to the a gateway.

" + "documentation":"

The amount of cache in bytes allocated to a gateway.

" }, "CacheUsedPercentage":{ "shape":"double", @@ -2242,7 +2384,7 @@ "members":{ "VolumeARNs":{ "shape":"VolumeARNs", - "documentation":"

An array of strings where each string represents the Amazon Resource Name (ARN) of a cached volume. All of the specified cached volumes must from the same gateway. Use ListVolumes to get volume ARNs for a gateway.

" + "documentation":"

An array of strings where each string represents the Amazon Resource Name (ARN) of a cached volume. All of the specified cached volumes must be from the same gateway. Use ListVolumes to get volume ARNs for a gateway.

" } } }, @@ -2275,7 +2417,7 @@ "documentation":"

An array of ChapInfo objects that represent CHAP credentials. Each object in the array contains CHAP credential information for one target-initiator pair. If no CHAP credentials are set, an empty array is returned. CHAP credential information is provided in a JSON object with the following fields:

  • InitiatorName: The iSCSI initiator that connects to the target.

  • SecretToAuthenticateInitiator: The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.

  • SecretToAuthenticateTarget: The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).

  • TargetARN: The Amazon Resource Name (ARN) of the storage volume.

" } }, - "documentation":"

A JSON object containing a .

" + "documentation":"

A JSON object containing the following fields:

" }, "DescribeGatewayInformationInput":{ "type":"structure", @@ -2335,7 +2477,7 @@ }, "VPCEndpoint":{ "shape":"string", - "documentation":"

The configuration settings for the virtual private cloud (VPC) endpoint for your gateway.

" + "documentation":"

The configuration settings for the virtual private cloud (VPC) endpoint for your gateway.

" }, "CloudWatchLogGroupARN":{ "shape":"CloudWatchLogGroupARN", @@ -2344,6 +2486,10 @@ "HostEnvironment":{ "shape":"HostEnvironment", "documentation":"

The type of hypervisor environment used by the host.

" + }, + "EndpointType":{ + "shape":"EndpointType", + "documentation":"

The type of endpoint for your gateway.

Valid Values: STANDARD | FIPS

" } }, "documentation":"

A JSON object containing the following fields:

" @@ -2354,7 +2500,7 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway.

" }, "DescribeMaintenanceStartTimeOutput":{ "type":"structure", @@ -2374,7 +2520,7 @@ }, "DayOfMonth":{ "shape":"DayOfMonth", - "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

This value is only available for tape and volume gateways.

" + "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

" }, "Timezone":{ "shape":"GatewayTimezone", @@ -2389,7 +2535,7 @@ "members":{ "FileShareARNList":{ "shape":"FileShareARNList", - "documentation":"

An array containing the Amazon Resource Name (ARN) of each file share to be described.

" + "documentation":"

An array containing the Amazon Resource Name (ARN) of each file share to be described.

" } }, "documentation":"

DescribeNFSFileSharesInput

" @@ -2399,7 +2545,7 @@ "members":{ "NFSFileShareInfoList":{ "shape":"NFSFileShareInfoList", - "documentation":"

An array containing a description for each requested file share.

" + "documentation":"

An array containing a description for each requested file share.

" } }, "documentation":"

DescribeNFSFileSharesOutput

" @@ -2410,7 +2556,7 @@ "members":{ "FileShareARNList":{ "shape":"FileShareARNList", - "documentation":"

An array containing the Amazon Resource Name (ARN) of each file share to be described.

" + "documentation":"

An array containing the Amazon Resource Name (ARN) of each file share to be described.

" } }, "documentation":"

DescribeSMBFileSharesInput

" @@ -2420,7 +2566,7 @@ "members":{ "SMBFileShareInfoList":{ "shape":"SMBFileShareInfoList", - "documentation":"

An array containing a description for each requested file share.

" + "documentation":"

An array containing a description for each requested file share.

" } }, "documentation":"

DescribeSMBFileSharesOutput

" @@ -2442,15 +2588,15 @@ }, "ActiveDirectoryStatus":{ "shape":"ActiveDirectoryStatus", - "documentation":"

Indicates the status of a gateway that is a member of the Active Directory domain.

  • ACCESS_DENIED: Indicates that the JoinDomain operation failed due to an authentication error.

  • DETACHED: Indicates that gateway is not joined to a domain.

  • JOINED: Indicates that the gateway has successfully joined a domain.

  • JOINING: Indicates that a JoinDomain operation is in progress.

  • NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or connectivity error.

  • TIMEOUT: Indicates that the JoinDomain operation failed because the operation didn't complete within the allotted time.

  • UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another type of error.

" + "documentation":"

Indicates the status of a gateway that is a member of the Active Directory domain.

  • ACCESS_DENIED: Indicates that the JoinDomain operation failed due to an authentication error.

  • DETACHED: Indicates that gateway is not joined to a domain.

  • JOINED: Indicates that the gateway has successfully joined a domain.

  • JOINING: Indicates that a JoinDomain operation is in progress.

  • NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or connectivity error.

  • TIMEOUT: Indicates that the JoinDomain operation failed because the operation didn't complete within the allotted time.

  • UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another type of error.

" }, "SMBGuestPasswordSet":{ "shape":"Boolean", - "documentation":"

This value is true if a password for the guest user “smbguest” is set, and otherwise false.

" + "documentation":"

This value is true if a password for the guest user smbguest is set, otherwise false.

Valid Values: true | false

" }, "SMBSecurityStrategy":{ "shape":"SMBSecurityStrategy", - "documentation":"

The type of security strategy that was specified for file gateway.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" + "documentation":"

The type of security strategy that was specified for file gateway.

  • ClientSpecified: If you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

  • MandatorySigning: If you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

  • MandatoryEncryption: If you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" } } }, @@ -2500,7 +2646,7 @@ "members":{ "VolumeARNs":{ "shape":"VolumeARNs", - "documentation":"

An array of strings where each string represents the Amazon Resource Name (ARN) of a stored volume. All of the specified stored volumes must from the same gateway. Use ListVolumes to get volume ARNs for a gateway.

" + "documentation":"

An array of strings where each string represents the Amazon Resource Name (ARN) of a stored volume. All of the specified stored volumes must be from the same gateway. Use ListVolumes to get volume ARNs for a gateway.

" } }, "documentation":"

A JSON object containing a list of DescribeStorediSCSIVolumesInput$VolumeARNs.

" @@ -2510,7 +2656,7 @@ "members":{ "StorediSCSIVolumes":{ "shape":"StorediSCSIVolumes", - "documentation":"

Describes a single unit of output from DescribeStorediSCSIVolumes. The following fields are returned:

  • ChapEnabled: Indicates whether mutual CHAP is enabled for the iSCSI target.

  • LunNumber: The logical disk number.

  • NetworkInterfaceId: The network interface ID of the stored volume that initiator use to map the stored volume as an iSCSI target.

  • NetworkInterfacePort: The port used to communicate with iSCSI targets.

  • PreservedExistingData: Indicates if when the stored volume was created, existing data on the underlying local disk was preserved.

  • SourceSnapshotId: If the stored volume was created from a snapshot, this field contains the snapshot ID used, e.g. snap-1122aabb. Otherwise, this field is not included.

  • StorediSCSIVolumes: An array of StorediSCSIVolume objects where each object contains metadata about one stored volume.

  • TargetARN: The Amazon Resource Name (ARN) of the volume target.

  • VolumeARN: The Amazon Resource Name (ARN) of the stored volume.

  • VolumeDiskId: The disk ID of the local disk that was specified in the CreateStorediSCSIVolume operation.

  • VolumeId: The unique identifier of the storage volume, e.g. vol-1122AABB.

  • VolumeiSCSIAttributes: An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes for one stored volume.

  • VolumeProgress: Represents the percentage complete if the volume is restoring or bootstrapping that represents the percent of data transferred. This field does not appear in the response if the stored volume is not restoring or bootstrapping.

  • VolumeSizeInBytes: The size of the volume in bytes.

  • VolumeStatus: One of the VolumeStatus values that indicates the state of the volume.

  • VolumeType: One of the enumeration values describing the type of the volume. Currently, on STORED volumes are supported.

" + "documentation":"

Describes a single unit of output from DescribeStorediSCSIVolumes. The following fields are returned:

  • ChapEnabled: Indicates whether mutual CHAP is enabled for the iSCSI target.

  • LunNumber: The logical disk number.

  • NetworkInterfaceId: The network interface ID of the stored volume that initiator use to map the stored volume as an iSCSI target.

  • NetworkInterfacePort: The port used to communicate with iSCSI targets.

  • PreservedExistingData: Indicates when the stored volume was created, existing data on the underlying local disk was preserved.

  • SourceSnapshotId: If the stored volume was created from a snapshot, this field contains the snapshot ID used, e.g. snap-1122aabb. Otherwise, this field is not included.

  • StorediSCSIVolumes: An array of StorediSCSIVolume objects where each object contains metadata about one stored volume.

  • TargetARN: The Amazon Resource Name (ARN) of the volume target.

  • VolumeARN: The Amazon Resource Name (ARN) of the stored volume.

  • VolumeDiskId: The disk ID of the local disk that was specified in the CreateStorediSCSIVolume operation.

  • VolumeId: The unique identifier of the storage volume, e.g. vol-1122AABB.

  • VolumeiSCSIAttributes: An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes for one stored volume.

  • VolumeProgress: Represents the percentage complete if the volume is restoring or bootstrapping that represents the percent of data transferred. This field does not appear in the response if the stored volume is not restoring or bootstrapping.

  • VolumeSizeInBytes: The size of the volume in bytes.

  • VolumeStatus: One of the VolumeStatus values that indicates the state of the volume.

  • VolumeType: One of the enumeration values describing the type of the volume. Currently, only STORED volumes are supported.

" } } }, @@ -2527,7 +2673,7 @@ }, "Limit":{ "shape":"PositiveIntObject", - "documentation":"

Specifies that the number of virtual tapes descried be limited to the specified number.

" + "documentation":"

Specifies that the number of virtual tapes described be limited to the specified number.

" } }, "documentation":"

DescribeTapeArchivesInput

" @@ -2537,7 +2683,7 @@ "members":{ "TapeArchives":{ "shape":"TapeArchives", - "documentation":"

An array of virtual tape objects in the virtual tape shelf (VTS). The description includes of the Amazon Resource Name (ARN) of the virtual tapes. The information returned includes the Amazon Resource Names (ARNs) of the tapes, size of the tapes, status of the tapes, progress of the description and tape barcode.

" + "documentation":"

An array of virtual tape objects in the virtual tape shelf (VTS). The description includes of the Amazon Resource Name (ARN) of the virtual tapes. The information returned includes the Amazon Resource Names (ARNs) of the tapes, size of the tapes, status of the tapes, progress of the description, and tape barcode.

" }, "Marker":{ "shape":"Marker", @@ -2588,7 +2734,7 @@ }, "Marker":{ "shape":"Marker", - "documentation":"

A marker value, obtained in a previous call to DescribeTapes. This marker indicates which page of results to retrieve.

If not specified, the first page of results is retrieved.

" + "documentation":"

A marker value, obtained in a previous call to DescribeTapes. This marker indicates which page of results to retrieve.

If not specified, the first page of results is retrieved.

" }, "Limit":{ "shape":"PositiveIntObject", @@ -2662,7 +2808,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "VTLDevices":{ "shape":"VTLDevices", - "documentation":"

An array of VTL device objects composed of the Amazon Resource Name(ARN) of the VTL devices.

" + "documentation":"

An array of VTL device objects composed of the Amazon Resource Name (ARN) of the VTL devices.

" }, "Marker":{ "shape":"Marker", @@ -2677,7 +2823,7 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway.

" }, "DescribeWorkingStorageOutput":{ "type":"structure", @@ -2713,7 +2859,7 @@ }, "ForceDetach":{ "shape":"Boolean", - "documentation":"

Set to true to forcibly remove the iSCSI connection of the target volume and detach the volume. The default is false. If this value is set to false, you must manually disconnect the iSCSI connection from the target volume.

" + "documentation":"

Set to true to forcibly remove the iSCSI connection of the target volume and detach the volume. The default is false. If this value is set to false, you must manually disconnect the iSCSI connection from the target volume.

Valid Values: true | false

" } }, "documentation":"

AttachVolumeInput

" @@ -2799,7 +2945,7 @@ "DiskAllocationType":{"shape":"DiskAllocationType"}, "DiskAllocationResource":{ "shape":"string", - "documentation":"

The iSCSI qualified name (IQN) that is defined for a disk. This field is not included in the response if the local disk is not defined as an iSCSI target. The format of this field is targetIqn::LUNNumber::region-volumeId.

" + "documentation":"

The iSCSI qualified name (IQN) that is defined for a disk. This field is not included in the response if the local disk is not defined as an iSCSI target. The format of this field is targetIqn::LUNNumber::region-volumeId.

" }, "DiskAttributeList":{"shape":"DiskAttributeList"} }, @@ -2807,7 +2953,7 @@ }, "DiskAllocationType":{ "type":"string", - "documentation":"

One of the DiskAllocationType enumeration values that identifies how a local disk is used. Valid values: UPLOAD_BUFFER, CACHE_STORAGE

", + "documentation":"

One of the DiskAllocationType enumeration values that identifies how a local disk is used.

Valid Values: UPLOAD_BUFFER | CACHE_STORAGE

", "max":100, "min":3 }, @@ -2858,6 +3004,11 @@ "DoubleObject":{"type":"double"}, "Ec2InstanceId":{"type":"string"}, "Ec2InstanceRegion":{"type":"string"}, + "EndpointType":{ + "type":"string", + "max":8, + "min":4 + }, "ErrorCode":{ "type":"string", "enum":[ @@ -2927,7 +3078,7 @@ }, "FileShareARN":{ "type":"string", - "documentation":"

The Amazon Resource Name (ARN) of the file share.

", + "documentation":"

The Amazon Resource Name (ARN) of the file share.

", "max":500, "min":50 }, @@ -2940,13 +3091,13 @@ "FileShareClientList":{ "type":"list", "member":{"shape":"IPV4AddressCIDR"}, - "documentation":"

The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.

", + "documentation":"

The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.

", "max":100, "min":1 }, "FileShareId":{ "type":"string", - "documentation":"

The ID of the file share.

", + "documentation":"

The ID of the file share.

", "max":30, "min":12 }, @@ -2965,9 +3116,14 @@ "type":"list", "member":{"shape":"FileShareInfo"} }, + "FileShareName":{ + "type":"string", + "max":255, + "min":1 + }, "FileShareStatus":{ "type":"string", - "documentation":"

The status of the file share. Possible values are CREATING, UPDATING, AVAILABLE and DELETING.

", + "documentation":"

The status of the file share.

Valid Values: CREATING | UPDATING | AVAILABLE | DELETING

", "max":50, "min":3 }, @@ -3029,7 +3185,7 @@ }, "GatewayOperationalState":{ "shape":"GatewayOperationalState", - "documentation":"

The state of the gateway.

Valid Values: DISABLED or ACTIVE

" + "documentation":"

The state of the gateway.

Valid Values: DISABLED | ACTIVE

" }, "GatewayName":{ "shape":"string", @@ -3083,6 +3239,8 @@ }, "Host":{ "type":"string", + "max":1024, + "min":6, "pattern":"^(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])(:(\\d+))?$" }, "HostEnvironment":{ @@ -3202,28 +3360,44 @@ }, "ActiveDirectoryStatus":{ "shape":"ActiveDirectoryStatus", - "documentation":"

Indicates the status of the gateway as a member of the Active Directory domain.

  • ACCESS_DENIED: Indicates that the JoinDomain operation failed due to an authentication error.

  • DETACHED: Indicates that gateway is not joined to a domain.

  • JOINED: Indicates that the gateway has successfully joined a domain.

  • JOINING: Indicates that a JoinDomain operation is in progress.

  • NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or connectivity error.

  • TIMEOUT: Indicates that the JoinDomain operation failed because the operation didn't complete within the allotted time.

  • UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another type of error.

" + "documentation":"

Indicates the status of the gateway as a member of the Active Directory domain.

  • ACCESS_DENIED: Indicates that the JoinDomain operation failed due to an authentication error.

  • DETACHED: Indicates that gateway is not joined to a domain.

  • JOINED: Indicates that the gateway has successfully joined a domain.

  • JOINING: Indicates that a JoinDomain operation is in progress.

  • NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or connectivity error.

  • TIMEOUT: Indicates that the JoinDomain operation failed because the operation didn't complete within the allotted time.

  • UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another type of error.

" } }, "documentation":"

JoinDomainOutput

" }, "KMSKey":{ "type":"string", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

", + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

", "max":2048, - "min":7 + "min":7, + "pattern":"(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)" }, "LastSoftwareUpdate":{ "type":"string", "max":25, "min":1 }, + "ListAutomaticTapeCreationPoliciesInput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "ListAutomaticTapeCreationPoliciesOutput":{ + "type":"structure", + "members":{ + "AutomaticTapeCreationPolicyInfos":{ + "shape":"AutomaticTapeCreationPolicyInfos", + "documentation":"

Gets a listing of information about the gateway's automatic tape creation policies, including the automatic tape creation rules and the gateway that is using the policies.

" + } + } + }, "ListFileSharesInput":{ "type":"structure", "members":{ "GatewayARN":{ "shape":"GatewayARN", - "documentation":"

The Amazon resource Name (ARN) of the gateway whose file shares you want to list. If this field is not present, all file shares under your account are listed.

" + "documentation":"

The Amazon Resource Name (ARN) of the gateway whose file shares you want to list. If this field is not present, all file shares under your account are listed.

" }, "Limit":{ "shape":"PositiveIntObject", @@ -3241,15 +3415,15 @@ "members":{ "Marker":{ "shape":"Marker", - "documentation":"

If the request includes Marker, the response returns that value in this field.

" + "documentation":"

If the request includes Marker, the response returns that value in this field.

" }, "NextMarker":{ "shape":"Marker", - "documentation":"

If a value is present, there are more file shares to return. In a subsequent request, use NextMarker as the value for Marker to retrieve the next set of file shares.

" + "documentation":"

If a value is present, there are more file shares to return. In a subsequent request, use NextMarker as the value for Marker to retrieve the next set of file shares.

" }, "FileShareInfoList":{ "shape":"FileShareInfoList", - "documentation":"

An array of information about the file gateway's file shares.

" + "documentation":"

An array of information about the file gateway's file shares.

" } }, "documentation":"

ListFileShareOutput

" @@ -3287,7 +3461,7 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway.

" }, "ListLocalDisksOutput":{ "type":"structure", @@ -3439,8 +3613,8 @@ }, "LocationARN":{ "type":"string", - "documentation":"

The ARN of the backend storage used for storing file data.

", - "max":310, + "documentation":"

The ARN of the backend storage used for storing file data. A prefix name can be added to the S3 bucket name. It must end with a \"/\".

", + "max":1400, "min":16 }, "Marker":{ @@ -3453,6 +3627,11 @@ "max":50, "min":2 }, + "MinimumNumTapes":{ + "type":"integer", + "max":10, + "min":1 + }, "MinuteOfHour":{ "type":"integer", "max":59, @@ -3463,19 +3642,19 @@ "members":{ "FileMode":{ "shape":"PermissionMode", - "documentation":"

The Unix file mode in the form \"nnnn\". For example, \"0666\" represents the default file mode inside the file share. The default value is 0666.

" + "documentation":"

The Unix file mode in the form \"nnnn\". For example, 0666 represents the default file mode inside the file share. The default value is 0666.

" }, "DirectoryMode":{ "shape":"PermissionMode", - "documentation":"

The Unix directory mode in the form \"nnnn\". For example, \"0666\" represents the default access mode for all directories inside the file share. The default value is 0777.

" + "documentation":"

The Unix directory mode in the form \"nnnn\". For example, 0666 represents the default access mode for all directories inside the file share. The default value is 0777.

" }, "GroupId":{ "shape":"PermissionId", - "documentation":"

The default group ID for the file share (unless the files have another group ID specified). The default value is nfsnobody.

" + "documentation":"

The default group ID for the file share (unless the files have another group ID specified). The default value is nfsnobody.

" }, "OwnerId":{ "shape":"PermissionId", - "documentation":"

The default owner ID for files in the file share (unless the files have another owner ID specified). The default value is nfsnobody.

" + "documentation":"

The default owner ID for files in the file share (unless the files have another owner ID specified). The default value is nfsnobody.

" } }, "documentation":"

Describes Network File System (NFS) file share default values. Files and folders stored as Amazon S3 objects in S3 buckets don't, by default, have Unix file permissions assigned to them. Upon discovery in an S3 bucket by Storage Gateway, the S3 objects that represent files and folders are assigned these default Unix permissions. This operation is only supported for file gateways.

" @@ -3490,7 +3669,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "KMSEncrypted":{ "shape":"boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{"shape":"KMSKey"}, "Path":{"shape":"Path"}, @@ -3498,26 +3677,34 @@ "LocationARN":{"shape":"LocationARN"}, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{"shape":"ObjectACL"}, "ClientList":{"shape":"FileShareClientList"}, "Squash":{"shape":"Squash"}, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set the write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" }, "Tags":{ "shape":"Tags", "documentation":"

A list of up to 50 tags assigned to the NFS file share, sorted alphabetically by key name. Each tag is a key-value pair. For a gateway with more than 10 tags assigned, you can view all tags using the ListTagsForResource API operation.

" + }, + "FileShareName":{ + "shape":"FileShareName", + "documentation":"

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN.

" + }, + "CacheAttributes":{ + "shape":"CacheAttributes", + "documentation":"

Refresh cache information.

" } }, "documentation":"

The Unix file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported in file gateways.

" @@ -3580,7 +3767,7 @@ }, "ObjectACL":{ "type":"string", - "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

", + "documentation":"

A value that sets the access control list (ACL) permission for objects in the S3 bucket that a file gateway puts objects into. The default value is private.

", "enum":[ "private", "public-read", @@ -3598,7 +3785,7 @@ }, "Path":{ "type":"string", - "documentation":"

The file share path used by the NFS client to identify the mount point.

" + "documentation":"

The file share path used by the NFS client to identify the mount point.

" }, "PermissionId":{ "type":"long", @@ -3635,11 +3822,11 @@ }, "FolderList":{ "shape":"FolderList", - "documentation":"

A comma-separated list of the paths of folders to refresh in the cache. The default is [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to \"true\", the entire S3 bucket that the file share has access to is refreshed.

" + "documentation":"

A comma-separated list of the paths of folders to refresh in the cache. The default is [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to true, the entire S3 bucket that the file share has access to is refreshed.

" }, "Recursive":{ "shape":"Boolean", - "documentation":"

A value that specifies whether to recursively refresh folders in the cache. The refresh includes folders that were in the cache the last time the gateway listed the folder's contents. If this value set to \"true\", each folder that is listed in FolderList is recursively updated. Otherwise, subfolders listed in FolderList are not refreshed. Only objects that are in folders listed directly under FolderList are found and used for the update. The default is \"true\".

" + "documentation":"

A value that specifies whether to recursively refresh folders in the cache. The refresh includes folders that were in the cache the last time the gateway listed the folder's contents. If this value set to true, each folder that is listed in FolderList is recursively updated. Otherwise, subfolders listed in FolderList are not refreshed. Only objects that are in folders listed directly under FolderList are found and used for the update. The default is true.

Valid Values: true | false

" } }, "documentation":"

RefreshCacheInput

" @@ -3670,7 +3857,7 @@ }, "TagKeys":{ "shape":"TagKeys", - "documentation":"

The keys of the tags you want to remove from the specified resource. A tag is composed of a key/value pair.

" + "documentation":"

The keys of the tags you want to remove from the specified resource. A tag is composed of a key-value pair.

" } }, "documentation":"

RemoveTagsFromResourceInput

" @@ -3758,9 +3945,10 @@ }, "Role":{ "type":"string", - "documentation":"

The ARN of the IAM role that file gateway assumes when it accesses the underlying storage.

", + "documentation":"

The ARN of the IAM role that file gateway assumes when it accesses the underlying storage.

", "max":2048, - "min":20 + "min":20, + "pattern":"^arn:(aws|aws-cn|aws-us-gov):iam::([0-9]+):role/(\\S+)$" }, "SMBFileShareInfo":{ "type":"structure", @@ -3771,7 +3959,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "KMSEncrypted":{ "shape":"boolean", - "documentation":"

True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{"shape":"KMSKey"}, "Path":{ @@ -3782,45 +3970,57 @@ "LocationARN":{"shape":"LocationARN"}, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{"shape":"ObjectACL"}, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set the write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" }, "SMBACLEnabled":{ "shape":"Boolean", - "documentation":"

If this value is set to \"true\", indicates that ACL (access control list) is enabled on the SMB file share. If it is set to \"false\", it indicates that file and directory permissions are mapped to the POSIX permission.

For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html in the Storage Gateway User Guide.

" + "documentation":"

If this value is set to true, it indicates that access control list (ACL) is enabled on the SMB file share. If it is set to false, it indicates that file and directory permissions are mapped to the POSIX permission.

For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the AWS Storage Gateway User Guide.

" }, "AdminUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "ValidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "InvalidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "AuditDestinationARN":{ "shape":"AuditDestinationARN", "documentation":"

The Amazon Resource Name (ARN) of the storage used for the audit logs.

" }, "Authentication":{"shape":"Authentication"}, + "CaseSensitivity":{ + "shape":"CaseSensitivity", + "documentation":"

The case of an object name in an Amazon S3 bucket. For ClientSpecified, the client determines the case sensitivity. For CaseSensitive, the gateway determines the case sensitivity. The default value is ClientSpecified.

" + }, "Tags":{ "shape":"Tags", "documentation":"

A list of up to 50 tags assigned to the SMB file share, sorted alphabetically by key name. Each tag is a key-value pair. For a gateway with more than 10 tags assigned, you can view all tags using the ListTagsForResource API operation.

" + }, + "FileShareName":{ + "shape":"FileShareName", + "documentation":"

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN.

" + }, + "CacheAttributes":{ + "shape":"CacheAttributes", + "documentation":"

Refresh cache information.

" } }, "documentation":"

The Windows file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported for file gateways.

" @@ -3893,7 +4093,7 @@ }, "Password":{ "shape":"SMBGuestPassword", - "documentation":"

The password that you want to set for your SMB Server.

" + "documentation":"

The password that you want to set for your SMB server.

" } }, "documentation":"

SetSMBGuestPasswordInput

" @@ -3910,14 +4110,14 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway to shut down.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway to shut down.

" }, "ShutdownGatewayOutput":{ "type":"structure", "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway that was shut down.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway that was shut down.

" }, "SnapshotDescription":{ "type":"string", @@ -3930,7 +4130,7 @@ }, "Squash":{ "type":"string", - "documentation":"

The user mapped to anonymous user. Valid options are the following:

  • RootSquash - Only root is mapped to anonymous user.

  • NoSquash - No one is mapped to anonymous user

  • AllSquash - Everyone is mapped to anonymous user.

", + "documentation":"

The user mapped to anonymous user. Valid options are the following:

  • RootSquash: Only root is mapped to anonymous user.

  • NoSquash: No one is mapped to anonymous user.

  • AllSquash: Everyone is mapped to anonymous user.

", "max":15, "min":5 }, @@ -3953,19 +4153,19 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway to start.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway to start.

" }, "StartGatewayOutput":{ "type":"structure", "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway that was restarted.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway that was restarted.

" }, "StorageClass":{ "type":"string", "documentation":"

", - "max":20, + "max":50, "min":5 }, "StorageGatewayError":{ @@ -3980,7 +4180,7 @@ "documentation":"

Human-readable text that provides detail about the error that occurred.

" } }, - "documentation":"

Provides additional information about an error that was returned by the service as an or. See the errorCode and errorDetails members for more information about the error.

" + "documentation":"

Provides additional information about an error that was returned by the service. See the errorCode and errorDetails members for more information about the error.

" }, "StorediSCSIVolume":{ "type":"structure", @@ -4003,7 +4203,7 @@ }, "VolumeAttachmentStatus":{ "shape":"VolumeAttachmentStatus", - "documentation":"

A value that indicates whether a storage volume is attached to, detached from, or is in the process of detaching from a gateway. For more information, see Moving Your Volumes to a Different Gateway.

" + "documentation":"

A value that indicates whether a storage volume is attached to, detached from, or is in the process of detaching from a gateway. For more information, see Moving your volumes to a different gateway.

" }, "VolumeSizeInBytes":{ "shape":"long", @@ -4023,7 +4223,7 @@ }, "PreservedExistingData":{ "shape":"boolean", - "documentation":"

Indicates if when the stored volume was created, existing data on the underlying local disk was preserved.

Valid Values: true, false

" + "documentation":"

Indicates if when the stored volume was created, existing data on the underlying local disk was preserved.

Valid Values: true | false

" }, "VolumeiSCSIAttributes":{ "shape":"VolumeiSCSIAttributes", @@ -4058,14 +4258,14 @@ "members":{ "Key":{ "shape":"TagKey", - "documentation":"

Tag key (String). The key can't start with aws:.

" + "documentation":"

Tag key. The key can't start with aws:.

" }, "Value":{ "shape":"TagValue", "documentation":"

Value of the tag key.

" } }, - "documentation":"

A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /

" + "documentation":"

A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /.

" }, "TagKey":{ "type":"string", @@ -4123,7 +4323,7 @@ "KMSKey":{"shape":"KMSKey"}, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that contains tapes that will be archived. The tapes in this pool are archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that contains tapes that will be archived. The tapes in this pool are archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" } }, "documentation":"

Describes a virtual tape object.

" @@ -4177,7 +4377,7 @@ "KMSKey":{"shape":"KMSKey"}, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that was used to archive the tape. The tapes in this pool are archived in the S3 storage class that is associated with the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that was used to archive the tape. The tapes in this pool are archived in the S3 storage class that is associated with the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" } }, "documentation":"

Represents a virtual tape that is archived in the virtual tape shelf (VTS).

" @@ -4229,7 +4429,7 @@ }, "PoolId":{ "shape":"PoolId", - "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.

Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"

" + "documentation":"

The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds to the pool.

Valid Values: GLACIER | DEEP_ARCHIVE

" } }, "documentation":"

Describes a virtual tape.

" @@ -4237,7 +4437,7 @@ "TapeInfos":{ "type":"list", "member":{"shape":"TapeInfo"}, - "documentation":"

An array of TapeInfo objects, where each object describes an a single tape. If there not tapes in the tape library or VTS, then the TapeInfos is an empty array.

" + "documentation":"

An array of TapeInfo objects, where each object describes a single tape. If there are no tapes in the tape library or VTS, then the TapeInfos is an empty array.

" }, "TapeRecoveryPointInfo":{ "type":"structure", @@ -4290,6 +4490,26 @@ "max":3600, "min":0 }, + "UpdateAutomaticTapeCreationPolicyInput":{ + "type":"structure", + "required":[ + "AutomaticTapeCreationRules", + "GatewayARN" + ], + "members":{ + "AutomaticTapeCreationRules":{ + "shape":"AutomaticTapeCreationRules", + "documentation":"

An automatic tape creation policy consists of a list of automatic tape creation rules. The rules determine when and how to automatically create new tapes.

" + }, + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "UpdateAutomaticTapeCreationPolicyOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, "UpdateBandwidthRateLimitInput":{ "type":"structure", "required":["GatewayARN"], @@ -4311,7 +4531,7 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway whose throttle information was updated.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway whose throttle information was updated.

" }, "UpdateChapCredentialsInput":{ "type":"structure", @@ -4366,7 +4586,7 @@ }, "CloudWatchLogGroupARN":{ "shape":"CloudWatchLogGroupARN", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use to monitor and log events in the gateway.

For more information, see What Is Amazon CloudWatch Logs?.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use to monitor and log events in the gateway.

For more information, see What is Amazon CloudWatch logs?.

" } } }, @@ -4379,7 +4599,7 @@ "documentation":"

The name you configured for your gateway.

" } }, - "documentation":"

A JSON object containing the ARN of the gateway that was updated.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway that was updated.

" }, "UpdateGatewaySoftwareNowInput":{ "type":"structure", @@ -4387,14 +4607,14 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway to update.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway to update.

" }, "UpdateGatewaySoftwareNowOutput":{ "type":"structure", "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway that was updated.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway that was updated.

" }, "UpdateMaintenanceStartTimeInput":{ "type":"structure", @@ -4419,7 +4639,7 @@ }, "DayOfMonth":{ "shape":"DayOfMonth", - "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

This value is only available for tape and volume gateways.

" + "documentation":"

The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.

" } }, "documentation":"

A JSON object containing the following fields:

" @@ -4429,7 +4649,7 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"} }, - "documentation":"

A JSON object containing the of the gateway whose maintenance start time is updated.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the gateway whose maintenance start time is updated.

" }, "UpdateNFSFileShareInput":{ "type":"structure", @@ -4437,15 +4657,15 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the file share to be updated.

" + "documentation":"

The Amazon Resource Name (ARN) of the file share to be updated.

" }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "NFSFileShareDefaults":{ "shape":"NFSFileShareDefaults", @@ -4453,11 +4673,11 @@ }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list (ACL) permission for objects in the S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ClientList":{ "shape":"FileShareClientList", @@ -4465,19 +4685,27 @@ }, "Squash":{ "shape":"Squash", - "documentation":"

The user mapped to anonymous user. Valid options are the following:

  • RootSquash - Only root is mapped to anonymous user.

  • NoSquash - No one is mapped to anonymous user

  • AllSquash - Everyone is mapped to anonymous user.

" + "documentation":"

The user mapped to anonymous user.

Valid values are the following:

  • RootSquash: Only root is mapped to anonymous user.

  • NoSquash: No one is mapped to anonymous user.

  • AllSquash: Everyone is mapped to anonymous user.

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set the write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" + }, + "FileShareName":{ + "shape":"FileShareName", + "documentation":"

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN.

" + }, + "CacheAttributes":{ + "shape":"CacheAttributes", + "documentation":"

Refresh cache information.

" } }, "documentation":"

UpdateNFSFileShareInput

" @@ -4487,7 +4715,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the updated file share.

" + "documentation":"

The Amazon Resource Name (ARN) of the updated file share.

" } }, "documentation":"

UpdateNFSFileShareOutput

" @@ -4502,51 +4730,63 @@ }, "KMSEncrypted":{ "shape":"Boolean", - "documentation":"

True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

" + "documentation":"

Set to true to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.

Valid Values: true | false

" }, "KMSKey":{ "shape":"KMSKey", - "documentation":"

The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.

" + "documentation":"

The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric CMKs. This value can only be set when KMSEncrypted is true. Optional.

" }, "DefaultStorageClass":{ "shape":"StorageClass", - "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. If this field is not populated, the default value S3_STANDARD is used. Optional.

" + "documentation":"

The default storage class for objects put into an Amazon S3 bucket by the file gateway. The default value is S3_INTELLIGENT_TIERING. Optional.

Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA

" }, "ObjectACL":{ "shape":"ObjectACL", - "documentation":"

A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".

" + "documentation":"

A value that sets the access control list (ACL) permission for objects in the S3 bucket that a file gateway puts objects into. The default value is private.

" }, "ReadOnly":{ "shape":"Boolean", - "documentation":"

A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.

" + "documentation":"

A value that sets the write status of a file share. Set this value to true to set write status to read-only, otherwise set to false.

Valid Values: true | false

" }, "GuessMIMETypeEnabled":{ "shape":"Boolean", - "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.

" + "documentation":"

A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, otherwise set to false. The default value is true.

Valid Values: true | false

" }, "RequesterPays":{ "shape":"Boolean", - "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

" + "documentation":"

A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.

RequesterPays is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.

Valid Values: true | false

" }, "SMBACLEnabled":{ "shape":"Boolean", - "documentation":"

Set this value to \"true to enable ACL (access control list) on the SMB file share. Set it to \"false\" to map file and directory permissions to the POSIX permissions.

For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.htmlin the Storage Gateway User Guide.

" + "documentation":"

Set this value to true to enable access control list (ACL) on the SMB file share. Set it to false to map file and directory permissions to the POSIX permissions.

For more information, see Using Microsoft Windows ACLs to control access to an SMB file share in the AWS Storage Gateway User Guide.

Valid Values: true | false

" }, "AdminUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that have administrator rights to the file share. A group must be prefixed with the @ character. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "ValidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "InvalidUserList":{ "shape":"FileShareUserList", - "documentation":"

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example @group1. Can only be set if Authentication is set to ActiveDirectory.

" + "documentation":"

A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. Acceptable formats include: DOMAIN\\User1, user1, @group1, and @DOMAIN\\group1. Can only be set if Authentication is set to ActiveDirectory.

" }, "AuditDestinationARN":{ "shape":"AuditDestinationARN", "documentation":"

The Amazon Resource Name (ARN) of the storage used for the audit logs.

" + }, + "CaseSensitivity":{ + "shape":"CaseSensitivity", + "documentation":"

The case of an object name in an Amazon S3 bucket. For ClientSpecified, the client determines the case sensitivity. For CaseSensitive, the gateway determines the case sensitivity. The default value is ClientSpecified.

" + }, + "FileShareName":{ + "shape":"FileShareName", + "documentation":"

The name of the file share. Optional.

FileShareName must be set if an S3 prefix name is set in LocationARN.

" + }, + "CacheAttributes":{ + "shape":"CacheAttributes", + "documentation":"

Refresh cache information.

" } }, "documentation":"

UpdateSMBFileShareInput

" @@ -4556,7 +4796,7 @@ "members":{ "FileShareARN":{ "shape":"FileShareARN", - "documentation":"

The Amazon Resource Name (ARN) of the updated SMB file share.

" + "documentation":"

The Amazon Resource Name (ARN) of the updated SMB file share.

" } }, "documentation":"

UpdateSMBFileShareOutput

" @@ -4571,7 +4811,7 @@ "GatewayARN":{"shape":"GatewayARN"}, "SMBSecurityStrategy":{ "shape":"SMBSecurityStrategy", - "documentation":"

Specifies the type of security strategy.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" + "documentation":"

Specifies the type of security strategy.

ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment.

MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer.

" } } }, @@ -4620,7 +4860,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.

" } }, - "documentation":"

A JSON object containing the of the updated storage volume.

" + "documentation":"

A JSON object containing the Amazon Resource Name (ARN) of the updated storage volume.

" }, "UpdateVTLDeviceTypeInput":{ "type":"structure", @@ -4635,7 +4875,7 @@ }, "DeviceType":{ "shape":"DeviceType", - "documentation":"

The type of medium changer you want to select.

Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"

" + "documentation":"

The type of medium changer you want to select.

Valid Values: STK-L700 | AWS-Gateway-VTL

" } } }, @@ -4715,16 +4955,16 @@ "members":{ "VolumeARN":{ "shape":"VolumeARN", - "documentation":"

The Amazon Resource Name (ARN) for the storage volume. For example, the following is a valid ARN:

arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" + "documentation":"

The Amazon Resource Name (ARN) for the storage volume. For example, the following is a valid ARN:

arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" }, "VolumeId":{ "shape":"VolumeId", - "documentation":"

The unique identifier assigned to the volume. This ID becomes part of the volume Amazon Resource Name (ARN), which you use as input for other operations.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" + "documentation":"

The unique identifier assigned to the volume. This ID becomes part of the volume Amazon Resource Name (ARN), which you use as input for other operations.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" }, "GatewayARN":{"shape":"GatewayARN"}, "GatewayId":{ "shape":"GatewayId", - "documentation":"

The unique identifier assigned to your gateway during activation. This ID becomes part of the gateway Amazon Resource Name (ARN), which you use as input for other operations.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" + "documentation":"

The unique identifier assigned to your gateway during activation. This ID becomes part of the gateway Amazon Resource Name (ARN), which you use as input for other operations.

Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).

" }, "VolumeType":{ "shape":"VolumeType", @@ -4736,7 +4976,7 @@ }, "VolumeAttachmentStatus":{ "shape":"VolumeAttachmentStatus", - "documentation":"

One of the VolumeStatus values that indicates the state of the storage volume.

" + "documentation":"

One of the VolumeStatus values that indicates the state of the storage volume.

" } }, "documentation":"

Describes a storage volume object.

" @@ -4819,5 +5059,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS Resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" + "documentation":"AWS Storage Gateway Service

AWS Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the AWS storage infrastructure. The service enables you to securely upload data to the AWS Cloud for cost effective backup and rapid disaster recovery.

Use the following links to get started using the AWS Storage Gateway Service API Reference:

AWS Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected.

IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs.

For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:

arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee.

For more information, see Announcement: Heads-up – Longer AWS Storage Gateway volume and snapshot IDs coming in 2016.

" } diff --git a/services/sts/pom.xml b/services/sts/pom.xml index 66c1e2d8fc3e..dffec6107af2 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/sts/src/main/resources/codegen-resources/service-2.json b/services/sts/src/main/resources/codegen-resources/service-2.json index 7e8282c38a55..996bdd1b1262 100644 --- a/services/sts/src/main/resources/codegen-resources/service-2.json +++ b/services/sts/src/main/resources/codegen-resources/service-2.json @@ -676,7 +676,8 @@ "SAMLAssertionType":{ "type":"string", "max":100000, - "min":4 + "min":4, + "sensitive":true }, "Subject":{"type":"string"}, "SubjectType":{"type":"string"}, @@ -721,7 +722,8 @@ "clientTokenType":{ "type":"string", "max":2048, - "min":4 + "min":4, + "sensitive":true }, "dateType":{"type":"timestamp"}, "decodedMessageType":{"type":"string"}, diff --git a/services/support/pom.xml b/services/support/pom.xml index 62db712b28b6..2f4d13cce16c 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/support/src/main/resources/codegen-resources/service-2.json b/services/support/src/main/resources/codegen-resources/service-2.json index 5787cdd60953..00952e6e346d 100644 --- a/services/support/src/main/resources/codegen-resources/service-2.json +++ b/services/support/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,7 @@ {"shape":"AttachmentSetSizeLimitExceeded"}, {"shape":"AttachmentLimitExceeded"} ], - "documentation":"

Adds one or more attachments to an attachment set. If an attachmentSetId is not specified, a new attachment set is created, and the ID of the set is returned in the response. If an attachmentSetId is specified, the attachments are added to the specified set, if it exists.

An attachment set is a temporary container for attachments that are to be added to a case or case communication. The set is available for one hour after it is created; the expiryTime returned in the response indicates when the set expires. The maximum number of attachments in a set is 3, and the maximum size of any attachment in the set is 5 MB.

" + "documentation":"

Adds one or more attachments to an attachment set.

An attachment set is a temporary container for attachments that you add to a case or case communication. The set is available for 1 hour after it's created. The expiryTime returned in the response is when the set expires.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "AddCommunicationToCase":{ "name":"AddCommunicationToCase", @@ -43,7 +43,7 @@ {"shape":"AttachmentSetIdNotFound"}, {"shape":"AttachmentSetExpired"} ], - "documentation":"

Adds additional customer communication to an AWS Support case. You use the caseId value to identify the case to add communication to. You can list a set of email addresses to copy on the communication using the ccEmailAddresses value. The communicationBody value contains the text of the communication.

The response indicates the success or failure of the request.

This operation implements a subset of the features of the AWS Support Center.

" + "documentation":"

Adds additional customer communication to an AWS Support case. Use the caseId parameter to identify the case to which to add communication. You can list a set of email addresses to copy on the communication by using the ccEmailAddresses parameter. The communicationBody value contains the text of the communication.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "CreateCase":{ "name":"CreateCase", @@ -59,7 +59,7 @@ {"shape":"AttachmentSetIdNotFound"}, {"shape":"AttachmentSetExpired"} ], - "documentation":"

Creates a new case in the AWS Support Center. This operation is modeled on the behavior of the AWS Support Center Create Case page. Its parameters require you to specify the following information:

  • issueType. The type of issue for the case. You can specify either \"customer-service\" or \"technical.\" If you do not indicate a value, the default is \"technical.\"

    Service limit increases are not supported by the Support API; you must submit service limit increase requests in Support Center.

    The caseId is not the displayId that appears in Support Center. You can use the DescribeCases API to get the displayId.

  • serviceCode. The code for an AWS service. You can get the possible serviceCode values by calling DescribeServices.

  • categoryCode. The category for the service defined for the serviceCode value. You also get the category code for a service by calling DescribeServices. Each AWS service defines its own set of category codes.

  • severityCode. A value that indicates the urgency of the case, which in turn determines the response time according to your service level agreement with AWS Support. You can get the possible severityCode values by calling DescribeSeverityLevels. For more information about the meaning of the codes, see SeverityLevel and Choosing a Severity.

  • subject. The Subject field on the AWS Support Center Create Case page.

  • communicationBody. The Description field on the AWS Support Center Create Case page.

  • attachmentSetId. The ID of a set of attachments that has been created by using AddAttachmentsToSet.

  • language. The human language in which AWS Support handles the case. English and Japanese are currently supported.

  • ccEmailAddresses. The AWS Support Center CC field on the Create Case page. You can list email addresses to be copied on any correspondence about the case. The account that opens the case is already identified by passing the AWS Credentials in the HTTP POST method or in a method or function call from one of the programming languages supported by an AWS SDK.

To add additional communication or attachments to an existing case, use AddCommunicationToCase.

A successful CreateCase request returns an AWS Support case number. Case numbers are used by the DescribeCases operation to retrieve existing AWS Support cases.

" + "documentation":"

Creates a case in the AWS Support Center. This operation is similar to how you create a case in the AWS Support Center Create Case page.

The AWS Support API doesn't support requesting service limit increases. You can submit a service limit increase in the following ways:

A successful CreateCase request returns an AWS Support case number. You can use the DescribeCases operation and specify the case number to get existing AWS Support cases. After you create a case, use the AddCommunicationToCase operation to add additional communication or attachments to an existing case.

The caseId is separate from the displayId that appears in the AWS Support Center. Use the DescribeCases operation to get the displayId.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "DescribeAttachment":{ "name":"DescribeAttachment", @@ -74,7 +74,7 @@ {"shape":"DescribeAttachmentLimitExceeded"}, {"shape":"AttachmentIdNotFound"} ], - "documentation":"

Returns the attachment that has the specified ID. Attachment IDs are generated by the case management system when you add an attachment to a case or case communication. Attachment IDs are returned in the AttachmentDetails objects that are returned by the DescribeCommunications operation.

" + "documentation":"

Returns the attachment that has the specified ID. Attachments can include screenshots, error logs, or other files that describe your issue. Attachment IDs are generated by the case management system when you add an attachment to a case or case communication. Attachment IDs are returned in the AttachmentDetails objects that are returned by the DescribeCommunications operation.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "DescribeCases":{ "name":"DescribeCases", @@ -88,7 +88,7 @@ {"shape":"InternalServerError"}, {"shape":"CaseIdNotFound"} ], - "documentation":"

Returns a list of cases that you specify by passing one or more case IDs. In addition, you can filter the cases by date by setting values for the afterTime and beforeTime request parameters. You can set values for the includeResolvedCases and includeCommunications request parameters to control how much information is returned.

Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.

The response returns the following in JSON format:

  • One or more CaseDetails data types.

  • One or more nextToken values, which specify where to paginate the returned records represented by the CaseDetails objects.

" + "documentation":"

Returns a list of cases that you specify by passing one or more case IDs. You can use the afterTime and beforeTime parameters to filter the cases by date. You can set values for the includeResolvedCases and includeCommunications parameters to specify how much information to return.

The response returns the following in JSON format:

  • One or more CaseDetails data types.

  • One or more nextToken values, which specify where to paginate the returned records represented by the CaseDetails objects.

Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request might return an error.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "DescribeCommunications":{ "name":"DescribeCommunications", @@ -102,7 +102,7 @@ {"shape":"InternalServerError"}, {"shape":"CaseIdNotFound"} ], - "documentation":"

Returns communications (and attachments) for one or more support cases. You can use the afterTime and beforeTime parameters to filter by date. You can use the caseId parameter to restrict the results to a particular case.

Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.

You can use the maxResults and nextToken parameters to control the pagination of the result set. Set maxResults to the number of cases you want displayed on each page, and use nextToken to specify the resumption of pagination.

" + "documentation":"

Returns communications and attachments for one or more support cases. Use the afterTime and beforeTime parameters to filter by date. You can use the caseId parameter to restrict the results to a specific case.

Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.

You can use the maxResults and nextToken parameters to control the pagination of the results. Set maxResults to the number of cases that you want to display on each page, and use nextToken to specify the resumption of pagination.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "DescribeServices":{ "name":"DescribeServices", @@ -115,7 +115,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Returns the current list of AWS services and a list of service categories that applies to each one. You then use service names and categories in your CreateCase requests. Each AWS service has its own set of categories.

The service codes and category codes correspond to the values that are displayed in the Service and Category drop-down lists on the AWS Support Center Create Case page. The values in those fields, however, do not necessarily match the service codes and categories returned by the DescribeServices request. Always use the service codes and categories obtained programmatically. This practice ensures that you always have the most recent set of service and category codes.

" + "documentation":"

Returns the current list of AWS services and a list of service categories for each service. You then use service names and categories in your CreateCase requests. Each AWS service has its own set of categories.

The service codes and category codes correspond to the values that appear in the Service and Category lists on the AWS Support Center Create Case page. The values in those fields don't necessarily match the service codes and categories returned by the DescribeServices operation. Always use the service codes and categories that the DescribeServices operation returns, so that you have the most recent set of service and category codes.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "DescribeSeverityLevels":{ "name":"DescribeSeverityLevels", @@ -128,7 +128,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Returns the list of severity levels that you can assign to an AWS Support case. The severity level for a case is also a field in the CaseDetails data type included in any CreateCase request.

" + "documentation":"

Returns the list of severity levels that you can assign to an AWS Support case. The severity level for a case is also a field in the CaseDetails data type that you include for a CreateCase request.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "DescribeTrustedAdvisorCheckRefreshStatuses":{ "name":"DescribeTrustedAdvisorCheckRefreshStatuses", @@ -141,7 +141,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Returns the refresh status of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

Some checks are refreshed automatically, and their refresh statuses cannot be retrieved by using this operation. Use of the DescribeTrustedAdvisorCheckRefreshStatuses operation for these checks causes an InvalidParameterValue error.

" + "documentation":"

Returns the refresh status of the AWS Trusted Advisor checks that have the specified check IDs. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.

Some checks are refreshed automatically, and you can't return their refresh statuses by using the DescribeTrustedAdvisorCheckRefreshStatuses operation. If you call this operation for these checks, you might see an InvalidParameterValue error.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "DescribeTrustedAdvisorCheckResult":{ "name":"DescribeTrustedAdvisorCheckResult", @@ -154,7 +154,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Returns the results of the Trusted Advisor check that has the specified check ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

The response contains a TrustedAdvisorCheckResult object, which contains these three objects:

In addition, the response contains these fields:

  • status. The alert status of the check: \"ok\" (green), \"warning\" (yellow), \"error\" (red), or \"not_available\".

  • timestamp. The time of the last refresh of the check.

  • checkId. The unique identifier for the check.

" + "documentation":"

Returns the results of the AWS Trusted Advisor check that has the specified check ID. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.

The response contains a TrustedAdvisorCheckResult object, which contains these three objects:

In addition, the response contains these fields:

  • status - The alert status of the check: \"ok\" (green), \"warning\" (yellow), \"error\" (red), or \"not_available\".

  • timestamp - The time of the last refresh of the check.

  • checkId - The unique identifier for the check.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "DescribeTrustedAdvisorCheckSummaries":{ "name":"DescribeTrustedAdvisorCheckSummaries", @@ -167,7 +167,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Returns the summaries of the results of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

The response contains an array of TrustedAdvisorCheckSummary objects.

" + "documentation":"

Returns the results for the AWS Trusted Advisor check summaries for the check IDs that you specified. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.

The response contains an array of TrustedAdvisorCheckSummary objects.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "DescribeTrustedAdvisorChecks":{ "name":"DescribeTrustedAdvisorChecks", @@ -180,7 +180,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Returns information about all available Trusted Advisor checks, including name, ID, category, description, and metadata. You must specify a language code; English (\"en\") and Japanese (\"ja\") are currently supported. The response contains a TrustedAdvisorCheckDescription for each check. The region must be set to us-east-1.

" + "documentation":"

Returns information about all available AWS Trusted Advisor checks, including the name, ID, category, description, and metadata. You must specify a language code. The AWS Support API currently supports English (\"en\") and Japanese (\"ja\"). The response contains a TrustedAdvisorCheckDescription object for each check. You must set the AWS Region to us-east-1.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "RefreshTrustedAdvisorCheck":{ "name":"RefreshTrustedAdvisorCheck", @@ -193,7 +193,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Requests a refresh of the Trusted Advisor check that has the specified check ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks.

Some checks are refreshed automatically, and they cannot be refreshed by using this operation. Use of the RefreshTrustedAdvisorCheck operation for these checks causes an InvalidParameterValue error.

The response contains a TrustedAdvisorCheckRefreshStatus object, which contains these fields:

  • status. The refresh status of the check:

    • none: The check is not refreshed or the non-success status exceeds the timeout

    • enqueued: The check refresh requests has entered the refresh queue

    • processing: The check refresh request is picked up by the rule processing engine

    • success: The check is successfully refreshed

    • abandoned: The check refresh has failed

  • millisUntilNextRefreshable. The amount of time, in milliseconds, until the check is eligible for refresh.

  • checkId. The unique identifier for the check.

" + "documentation":"

Refreshes the AWS Trusted Advisor check that you specify using the check ID. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.

Some checks are refreshed automatically. If you call the RefreshTrustedAdvisorCheck operation to refresh them, you might see the InvalidParameterValue error.

The response contains a TrustedAdvisorCheckRefreshStatus object.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" }, "ResolveCase":{ "name":"ResolveCase", @@ -207,7 +207,7 @@ {"shape":"InternalServerError"}, {"shape":"CaseIdNotFound"} ], - "documentation":"

Takes a caseId and returns the initial state of the case along with the state of the case after the call to ResolveCase completed.

" + "documentation":"

Resolves a support case. This operation takes a caseId and returns the initial and final state of the case.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

" } }, "shapes":{ @@ -221,10 +221,9 @@ }, "attachments":{ "shape":"Attachments", - "documentation":"

One or more attachments to add to the set. The limit is 3 attachments per set, and the size limit is 5 MB per attachment.

" + "documentation":"

One or more attachments to add to the set. You can add up to three attachments per set. The size limit is 5 MB per attachment.

In the Attachment object, use the data parameter to specify the contents of the attachment file. In the previous request syntax, the value for data appear as blob, which is represented as a base64-encoded string. The value for fileName is the name of the attachment, such as troubleshoot-screenshot.png.

" } - }, - "documentation":"

" + } }, "AddAttachmentsToSetResponse":{ "type":"structure", @@ -260,8 +259,7 @@ "shape":"AttachmentSetId", "documentation":"

The ID of a set of one or more attachments for the communication to add to the case. Create the set by calling AddAttachmentsToSet

" } - }, - "documentation":"

To be written.

" + } }, "AddCommunicationToCaseResponse":{ "type":"structure", @@ -334,7 +332,7 @@ "members":{ "message":{ "shape":"ErrorMessage", - "documentation":"

The expiration time of the attachment set has passed. The set expires 1 hour after it is created.

" + "documentation":"

The expiration time of the attachment set has passed. The set expires one hour after it is created.

" } }, "documentation":"

The expiration time of the attachment set has passed. The set expires 1 hour after it is created.

", @@ -357,10 +355,10 @@ "members":{ "message":{ "shape":"ErrorMessage", - "documentation":"

A limit for the size of an attachment set has been exceeded. The limits are 3 attachments and 5 MB per attachment.

" + "documentation":"

A limit for the size of an attachment set has been exceeded. The limits are three attachments and 5 MB per attachment.

" } }, - "documentation":"

A limit for the size of an attachment set has been exceeded. The limits are 3 attachments and 5 MB per attachment.

", + "documentation":"

A limit for the size of an attachment set has been exceeded. The limits are three attachments and 5 MB per attachment.

", "exception":true }, "Attachments":{ @@ -397,7 +395,7 @@ }, "status":{ "shape":"Status", - "documentation":"

The status of the case. Valid values: resolved | pending-customer-action | opened | unassigned | work-in-progress.

" + "documentation":"

The status of the case.

Valid values:

  • opened

  • pending-customer-action

  • reopened

  • resolved

  • unassigned

  • work-in-progress

" }, "serviceCode":{ "shape":"ServiceCode", @@ -417,7 +415,7 @@ }, "timeCreated":{ "shape":"TimeCreated", - "documentation":"

The time that the case was case created in the AWS Support Center.

" + "documentation":"

The time that the case was created in the AWS Support Center.

" }, "recentCommunications":{ "shape":"RecentCaseCommunications", @@ -432,7 +430,7 @@ "documentation":"

The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

" } }, - "documentation":"

A JSON-formatted object that contains the metadata for a support case. It is contained the response from a DescribeCases request. CaseDetails contains the following fields:

  • caseId. The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47.

  • categoryCode. The category of problem for the AWS Support case. Corresponds to the CategoryCode values returned by a call to DescribeServices.

  • displayId. The identifier for the case on pages in the AWS Support Center.

  • language. The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

  • recentCommunications. One or more Communication objects. Fields of these objects are attachments, body, caseId, submittedBy, and timeCreated.

  • nextToken. A resumption point for pagination.

  • serviceCode. The identifier for the AWS service that corresponds to the service code defined in the call to DescribeServices.

  • severityCode. The severity code assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels. The possible values are: low, normal, high, urgent, and critical.

  • status. The status of the case in the AWS Support Center. The possible values are: resolved, pending-customer-action, opened, unassigned, and work-in-progress.

  • subject. The subject line of the case.

  • submittedBy. The email address of the account that submitted the case.

  • timeCreated. The time the case was created, in ISO-8601 format.

" + "documentation":"

A JSON-formatted object that contains the metadata for a support case. It is contained in the response from a DescribeCases request. CaseDetails contains the following fields:

  • caseId. The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47.

  • categoryCode. The category of problem for the AWS Support case. Corresponds to the CategoryCode values returned by a call to DescribeServices.

  • displayId. The identifier for the case on pages in the AWS Support Center.

  • language. The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

  • nextToken. A resumption point for pagination.

  • recentCommunications. One or more Communication objects. Fields of these objects are attachments, body, caseId, submittedBy, and timeCreated.

  • serviceCode. The identifier for the AWS service that corresponds to the service code defined in the call to DescribeServices.

  • severityCode. The severity code assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels. The possible values are: low, normal, high, urgent, and critical.

  • status. The status of the case in the AWS Support Center. Valid values:

    • opened

    • pending-customer-action

    • reopened

    • resolved

    • unassigned

    • work-in-progress

  • subject. The subject line of the case.

  • submittedBy. The email address of the account that submitted the case.

  • timeCreated. The time the case was created, in ISO-8601 format.

" }, "CaseId":{"type":"string"}, "CaseIdList":{ @@ -528,39 +526,39 @@ "members":{ "subject":{ "shape":"Subject", - "documentation":"

The title of the AWS Support case.

" + "documentation":"

The title of the AWS Support case. The title appears in the Subject field on the AWS Support Center Create Case page.

" }, "serviceCode":{ "shape":"ServiceCode", - "documentation":"

The code for the AWS service returned by the call to DescribeServices.

" + "documentation":"

The code for the AWS service. You can use the DescribeServices operation to get the possible serviceCode values.

" }, "severityCode":{ "shape":"SeverityCode", - "documentation":"

The code for the severity level returned by the call to DescribeSeverityLevels.

The availability of severity levels depends on the support plan for the account.

" + "documentation":"

A value that indicates the urgency of the case. This value determines the response time according to your service level agreement with AWS Support. You can use the DescribeSeverityLevels operation to get the possible values for severityCode.

For more information, see SeverityLevel and Choosing a Severity in the AWS Support User Guide.

The availability of severity levels depends on the support plan for the AWS account.

" }, "categoryCode":{ "shape":"CategoryCode", - "documentation":"

The category of problem for the AWS Support case.

" + "documentation":"

The category of problem for the AWS Support case. You also use the DescribeServices operation to get the category code for a service. Each AWS service defines its own set of category codes.

" }, "communicationBody":{ "shape":"CommunicationBody", - "documentation":"

The communication body text when you create an AWS Support case by calling CreateCase.

" + "documentation":"

The communication body text that describes the issue. This text appears in the Description field on the AWS Support Center Create Case page.

" }, "ccEmailAddresses":{ "shape":"CcEmailAddressList", - "documentation":"

A list of email addresses that AWS Support copies on case correspondence.

" + "documentation":"

A list of email addresses that AWS Support copies on case correspondence. AWS Support identifies the account that creates the case when you specify your AWS credentials in an HTTP POST method or use the AWS SDKs.

" }, "language":{ "shape":"Language", - "documentation":"

The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

" + "documentation":"

The language in which AWS Support handles the case. You must specify the ISO 639-1 code for the language parameter if you want support in that language. Currently, English (\"en\") and Japanese (\"ja\") are supported.

" }, "issueType":{ "shape":"IssueType", - "documentation":"

The type of issue for the case. You can specify either \"customer-service\" or \"technical.\" If you do not indicate a value, the default is \"technical.\"

Service limit increases are not supported by the Support API; you must submit service limit increase requests in Support Center.

" + "documentation":"

The type of issue for the case. You can specify customer-service or technical. If you don't specify a value, the default is technical.

" }, "attachmentSetId":{ "shape":"AttachmentSetId", - "documentation":"

The ID of a set of one or more attachments for the case. Create the set by using AddAttachmentsToSet.

" + "documentation":"

The ID of a set of one or more attachments for the case. Create the set by using the AddAttachmentsToSet operation.

" } } }, @@ -569,7 +567,7 @@ "members":{ "caseId":{ "shape":"CaseId", - "documentation":"

The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-12345678910-2013-c4c1d2bf33c5cf47

" + "documentation":"

The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string in the following format: case-12345678910-2013-c4c1d2bf33c5cf47

" } }, "documentation":"

The AWS Support case ID returned by a successful completion of the CreateCase operation.

" @@ -601,7 +599,7 @@ "members":{ "attachment":{ "shape":"Attachment", - "documentation":"

The attachment content and file name.

" + "documentation":"

This object includes the attachment content and file name.

In the previous response syntax, the value for the data parameter appears as blob, which is represented as a base64-encoded string. The value for fileName is the name of the attachment, such as troubleshoot-screenshot.png.

" } }, "documentation":"

The content and file name of the attachment returned by the DescribeAttachment operation.

" @@ -627,7 +625,7 @@ }, "includeResolvedCases":{ "shape":"IncludeResolvedCases", - "documentation":"

Specifies whether resolved support cases should be included in the DescribeCases results. The default is false.

" + "documentation":"

Specifies whether to include resolved support cases in the DescribeCases response. By default, resolved cases aren't included.

" }, "nextToken":{ "shape":"NextToken", @@ -643,7 +641,7 @@ }, "includeCommunications":{ "shape":"IncludeCommunications", - "documentation":"

Specifies whether communications should be included in the DescribeCases results. The default is true.

" + "documentation":"

Specifies whether to include communications in the DescribeCases response. By default, communications are incuded.

" } } }, @@ -659,7 +657,7 @@ "documentation":"

A resumption point for pagination.

" } }, - "documentation":"

Returns an array of CaseDetails objects and a nextToken that defines a point for pagination in the result set.

" + "documentation":"

Returns an array of CaseDetails objects and a nextToken that defines a point for pagination in the result set.

" }, "DescribeCommunicationsRequest":{ "type":"structure", @@ -749,10 +747,9 @@ "members":{ "checkIds":{ "shape":"StringList", - "documentation":"

The IDs of the Trusted Advisor checks to get the status of. Note: Specifying the check ID of a check that is automatically refreshed causes an InvalidParameterValue error.

" + "documentation":"

The IDs of the Trusted Advisor checks to get the status of.

If you specify the check ID of a check that is automatically refreshed, you might see an InvalidParameterValue error.

" } - }, - "documentation":"

" + } }, "DescribeTrustedAdvisorCheckRefreshStatusesResponse":{ "type":"structure", @@ -819,8 +816,7 @@ "shape":"String", "documentation":"

The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.

" } - }, - "documentation":"

" + } }, "DescribeTrustedAdvisorChecksResponse":{ "type":"structure", @@ -961,10 +957,10 @@ }, "name":{ "shape":"SeverityLevelName", - "documentation":"

The name of the severity level that corresponds to the severity level code.

The values returned by the API differ from the values that are displayed in the AWS Support Center. For example, for the code \"low\", the API name is \"Low\", but the name in the Support Center is \"General guidance\". These are the Support Center code/name mappings:

  • low: General guidance

  • normal: System impaired

  • high: Production system impaired

  • urgent: Production system down

  • critical: Business-critical system down

For more information, see Choosing a Severity

" + "documentation":"

The name of the severity level that corresponds to the severity level code.

The values returned by the API differ from the values that are displayed in the AWS Support Center. For example, for the code \"low\", the API name is \"Low\", but the name in the Support Center is \"General guidance\". These are the Support Center code/name mappings:

  • low: General guidance

  • normal: System impaired

  • high: Production system impaired

  • urgent: Production system down

  • critical: Business-critical system down

For more information, see Choosing a severity in the AWS Support User Guide.

" } }, - "documentation":"

A code and name pair that represents the severity level of a support case. The available values depend on the support plan for the account. For more information, see Choosing a Severity.

" + "documentation":"

A code and name pair that represents the severity level of a support case. The available values depend on the support plan for the account. For more information, see Choosing a severity in the AWS Support User Guide.

" }, "SeverityLevelCode":{"type":"string"}, "SeverityLevelName":{"type":"string"}, @@ -1011,7 +1007,7 @@ }, "description":{ "shape":"String", - "documentation":"

The description of the Trusted Advisor check, which includes the alert criteria and recommended actions (contains HTML markup).

" + "documentation":"

The description of the Trusted Advisor check, which includes the alert criteria and recommended operations (contains HTML markup).

" }, "category":{ "shape":"String", @@ -1137,14 +1133,14 @@ "members":{ "estimatedMonthlySavings":{ "shape":"Double", - "documentation":"

The estimated monthly savings that might be realized if the recommended actions are taken.

" + "documentation":"

The estimated monthly savings that might be realized if the recommended operations are taken.

" }, "estimatedPercentMonthlySavings":{ "shape":"Double", - "documentation":"

The estimated percentage of savings that might be realized if the recommended actions are taken.

" + "documentation":"

The estimated percentage of savings that might be realized if the recommended operations are taken.

" } }, - "documentation":"

The estimated cost savings that might be realized if the recommended actions are taken.

" + "documentation":"

The estimated cost savings that might be realized if the recommended operations are taken.

" }, "TrustedAdvisorResourceDetail":{ "type":"structure", @@ -1210,5 +1206,5 @@ "documentation":"

Details about AWS resources that were analyzed in a call to Trusted Advisor DescribeTrustedAdvisorCheckSummaries.

" } }, - "documentation":"AWS Support

The AWS Support API reference is intended for programmers who need detailed information about the AWS Support operations and data types. This service enables you to manage your AWS Support cases programmatically. It uses HTTP methods that return results in JSON format.

The AWS Support service also exposes a set of Trusted Advisor features. You can retrieve a list of checks and their descriptions, get check results, specify checks to refresh, and get the refresh status of checks.

The following list describes the AWS Support case management operations:

The following list describes the operations available from the AWS Support service for Trusted Advisor:

For authentication of requests, AWS Support uses Signature Version 4 Signing Process.

See About the AWS Support API in the AWS Support User Guide for information about how to use this service to create and manage your support cases, and how to call Trusted Advisor for results of checks on your resources.

" + "documentation":"AWS Support

The AWS Support API reference is intended for programmers who need detailed information about the AWS Support operations and data types. This service enables you to manage your AWS Support cases programmatically. It uses HTTP methods that return results in JSON format.

  • You must have a Business or Enterprise support plan to use the AWS Support API.

  • If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support.

The AWS Support service also exposes a set of AWS Trusted Advisor features. You can retrieve a list of checks and their descriptions, get check results, specify checks to refresh, and get the refresh status of checks.

The following list describes the AWS Support case management operations:

The following list describes the operations available from the AWS Support service for Trusted Advisor:

For authentication of requests, AWS Support uses Signature Version 4 Signing Process.

See About the AWS Support API in the AWS Support User Guide for information about how to use this service to create and manage your support cases, and how to call Trusted Advisor for results of checks on your resources.

" } diff --git a/services/swf/pom.xml b/services/swf/pom.xml index ff76793bb2ee..248a13988042 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml new file mode 100644 index 000000000000..7dd4b40b4090 --- /dev/null +++ b/services/synthetics/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.13.56-SNAPSHOT + + synthetics + AWS Java SDK :: Services :: Synthetics + The AWS Java SDK for Synthetics module holds the client classes that are used for + communicating with Synthetics. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.synthetics + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/synthetics/src/main/resources/codegen-resources/paginators-1.json b/services/synthetics/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..e5412aa47fd0 --- /dev/null +++ b/services/synthetics/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,24 @@ +{ + "pagination": { + "DescribeCanaries": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "DescribeCanariesLastRun": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "DescribeRuntimeVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "GetCanaryRuns": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + } + } +} \ No newline at end of file diff --git a/services/synthetics/src/main/resources/codegen-resources/service-2.json b/services/synthetics/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..f4207b694492 --- /dev/null +++ b/services/synthetics/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1084 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-10-11", + "endpointPrefix":"synthetics", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Synthetics", + "serviceFullName":"Synthetics", + "serviceId":"synthetics", + "signatureVersion":"v4", + "signingName":"synthetics", + "uid":"synthetics-2017-10-11" + }, + "operations":{ + "CreateCanary":{ + "name":"CreateCanary", + "http":{ + "method":"POST", + "requestUri":"/canary" + }, + "input":{"shape":"CreateCanaryRequest"}, + "output":{"shape":"CreateCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a canary. Canaries are scripts that monitor your endpoints and APIs from the outside-in. Canaries help you check the availability and latency of your web services and troubleshoot anomalies by investigating load time data, screenshots of the UI, logs, and metrics. You can set up a canary to run continuously or just once.

Do not use CreateCanary to modify an existing canary. Use UpdateCanary instead.

To create canaries, you must have the CloudWatchSyntheticsFullAccess policy. If you are creating a new IAM role for the canary, you also need the the iam:CreateRole, iam:CreatePolicy and iam:AttachRolePolicy permissions. For more information, see Necessary Roles and Permissions.

Do not include secrets or proprietary information in your canary names. The canary name makes up part of the Amazon Resource Name (ARN) for the canary, and the ARN is included in outbound calls over the internet. For more information, see Security Considerations for Synthetics Canaries.

" + }, + "DeleteCanary":{ + "name":"DeleteCanary", + "http":{ + "method":"DELETE", + "requestUri":"/canary/{name}" + }, + "input":{"shape":"DeleteCanaryRequest"}, + "output":{"shape":"DeleteCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Permanently deletes the specified canary.

When you delete a canary, resources used and created by the canary are not automatically deleted. After you delete a canary that you do not intend to use again, you should also delete the following:

  • The Lambda functions and layers used by this canary. These have the prefix cwsyn-MyCanaryName .

  • The CloudWatch alarms created for this canary. These alarms have a name of Synthetics-SharpDrop-Alarm-MyCanaryName .

  • Amazon S3 objects and buckets, such as the canary's artifact location.

  • IAM roles created for the canary. If they were created in the console, these roles have the name role/service-role/CloudWatchSyntheticsRole-MyCanaryName .

  • CloudWatch Logs log groups created for the canary. These logs groups have the name /aws/lambda/cwsyn-MyCanaryName .

Before you delete a canary, you might want to use GetCanary to display the information about this canary. Make note of the information returned by this operation so that you can delete these resources after you delete the canary.

" + }, + "DescribeCanaries":{ + "name":"DescribeCanaries", + "http":{ + "method":"POST", + "requestUri":"/canaries" + }, + "input":{"shape":"DescribeCanariesRequest"}, + "output":{"shape":"DescribeCanariesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

This operation returns a list of the canaries in your account, along with full details about each canary.

This operation does not have resource-level authorization, so if a user is able to use DescribeCanaries, the user can see all of the canaries in the account. A deny policy can only be used to restrict access to all canaries. It cannot be used on specific resources.

" + }, + "DescribeCanariesLastRun":{ + "name":"DescribeCanariesLastRun", + "http":{ + "method":"POST", + "requestUri":"/canaries/last-run" + }, + "input":{"shape":"DescribeCanariesLastRunRequest"}, + "output":{"shape":"DescribeCanariesLastRunResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Use this operation to see information from the most recent run of each canary that you have created.

" + }, + "DescribeRuntimeVersions":{ + "name":"DescribeRuntimeVersions", + "http":{ + "method":"POST", + "requestUri":"/runtime-versions" + }, + "input":{"shape":"DescribeRuntimeVersionsRequest"}, + "output":{"shape":"DescribeRuntimeVersionsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of Synthetics canary runtime versions. For more information, see Canary Runtime Versions.

" + }, + "GetCanary":{ + "name":"GetCanary", + "http":{ + "method":"GET", + "requestUri":"/canary/{name}" + }, + "input":{"shape":"GetCanaryRequest"}, + "output":{"shape":"GetCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Retrieves complete information about one canary. You must specify the name of the canary that you want. To get a list of canaries and their names, use DescribeCanaries.

" + }, + "GetCanaryRuns":{ + "name":"GetCanaryRuns", + "http":{ + "method":"POST", + "requestUri":"/canary/{name}/runs" + }, + "input":{"shape":"GetCanaryRunsRequest"}, + "output":{"shape":"GetCanaryRunsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves a list of runs for a specified canary.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Displays the tags associated with a canary.

" + }, + "StartCanary":{ + "name":"StartCanary", + "http":{ + "method":"POST", + "requestUri":"/canary/{name}/start" + }, + "input":{"shape":"StartCanaryRequest"}, + "output":{"shape":"StartCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Use this operation to run a canary that has already been created. The frequency of the canary runs is determined by the value of the canary's Schedule. To see a canary's schedule, use GetCanary.

" + }, + "StopCanary":{ + "name":"StopCanary", + "http":{ + "method":"POST", + "requestUri":"/canary/{name}/stop" + }, + "input":{"shape":"StopCanaryRequest"}, + "output":{"shape":"StopCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Stops the canary to prevent all future runs. If the canary is currently running, Synthetics stops waiting for the current run of the specified canary to complete. The run that is in progress completes on its own, publishes metrics, and uploads artifacts, but it is not recorded in Synthetics as a completed run.

You can use StartCanary to start it running again with the canary’s current schedule at any point in the future.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Assigns one or more tags (key-value pairs) to the specified canary.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with a canary that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a canary.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Removes one or more tags from the specified canary.

" + }, + "UpdateCanary":{ + "name":"UpdateCanary", + "http":{ + "method":"PATCH", + "requestUri":"/canary/{name}" + }, + "input":{"shape":"UpdateCanaryRequest"}, + "output":{"shape":"UpdateCanaryResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Use this operation to change the settings of a canary that has already been created.

You can't use this operation to update the tags of an existing canary. To change the tags of an existing canary, use TagResource.

" + } + }, + "shapes":{ + "Arn":{ + "type":"string", + "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso-{0,1}[a-z]{0,1}):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, + "Blob":{ + "type":"blob", + "max":10000000, + "min":1 + }, + "Canaries":{ + "type":"list", + "member":{"shape":"Canary"} + }, + "CanariesLastRun":{ + "type":"list", + "member":{"shape":"CanaryLastRun"} + }, + "Canary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"UUID", + "documentation":"

The unique ID of this canary.

" + }, + "Name":{ + "shape":"CanaryName", + "documentation":"

The name of the canary.

" + }, + "Code":{"shape":"CanaryCodeOutput"}, + "ExecutionRoleArn":{ + "shape":"Arn", + "documentation":"

The ARN of the IAM role used to run the canary. This role must include lambda.amazonaws.com as a principal in the trust policy.

" + }, + "Schedule":{ + "shape":"CanaryScheduleOutput", + "documentation":"

A structure that contains information about how often the canary is to run, and when these runs are to stop.

" + }, + "RunConfig":{"shape":"CanaryRunConfigOutput"}, + "SuccessRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

The number of days to retain data about successful runs of this canary.

" + }, + "FailureRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

The number of days to retain data about failed runs of this canary.

" + }, + "Status":{ + "shape":"CanaryStatus", + "documentation":"

A structure that contains information about the canary's status.

" + }, + "Timeline":{ + "shape":"CanaryTimeline", + "documentation":"

A structure that contains information about when the canary was created, modified, and most recently run.

" + }, + "ArtifactS3Location":{ + "shape":"String", + "documentation":"

The location in Amazon S3 where Synthetics stores artifacts from the runs of this canary. Artifacts include the log file, screenshots, and HAR files.

" + }, + "EngineArn":{ + "shape":"Arn", + "documentation":"

The ARN of the Lambda function that is used as your canary's engine. For more information about Lambda ARN format, see Resources and Conditions for Lambda Actions.

" + }, + "RuntimeVersion":{ + "shape":"String", + "documentation":"

Specifies the runtime version to use for the canary. Currently, the only valid value is syn-1.0. For more information about runtime versions, see Canary Runtime Versions.

" + }, + "VpcConfig":{"shape":"VpcConfigOutput"}, + "Tags":{ + "shape":"TagMap", + "documentation":"

The list of key-value pairs that are associated with the canary.

" + } + }, + "documentation":"

This structure contains all information about one canary in your account.

" + }, + "CanaryCodeInput":{ + "type":"structure", + "required":["Handler"], + "members":{ + "S3Bucket":{ + "shape":"String", + "documentation":"

If your canary script is located in S3, specify the full bucket name here. The bucket must already exist. Specify the full bucket name, including s3:// as the start of the bucket name.

" + }, + "S3Key":{ + "shape":"String", + "documentation":"

The S3 key of your script. For more information, see Working with Amazon S3 Objects.

" + }, + "S3Version":{ + "shape":"String", + "documentation":"

The S3 version ID of your script.

" + }, + "ZipFile":{ + "shape":"Blob", + "documentation":"

If you input your canary script directly into the canary instead of referring to an S3 location, the value of this parameter is the .zip file that contains the script. It can be up to 5 MB.

" + }, + "Handler":{ + "shape":"String", + "documentation":"

The entry point to use for the source code when running the canary. This value must end with the string .handler.

" + } + }, + "documentation":"

Use this structure to input your script code for the canary. This structure contains the Lambda handler with the location where the canary should start running the script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included. If the script was passed into the canary directly, the script code is contained in the value of Zipfile.

" + }, + "CanaryCodeOutput":{ + "type":"structure", + "members":{ + "SourceLocationArn":{ + "shape":"String", + "documentation":"

The ARN of the Lambda layer where Synthetics stores the canary script code.

" + }, + "Handler":{ + "shape":"String", + "documentation":"

The entry point to use for the source code when running the canary.

" + } + }, + "documentation":"

This structure contains information about the canary's Lambda handler and where its code is stored by CloudWatch Synthetics.

" + }, + "CanaryLastRun":{ + "type":"structure", + "members":{ + "CanaryName":{ + "shape":"CanaryName", + "documentation":"

The name of the canary.

" + }, + "LastRun":{ + "shape":"CanaryRun", + "documentation":"

The results from this canary's most recent run.

" + } + }, + "documentation":"

This structure contains information about the most recent run of a single canary.

" + }, + "CanaryName":{ + "type":"string", + "max":21, + "min":1, + "pattern":"^[0-9a-z_\\-]+$" + }, + "CanaryRun":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

The name of the canary.

" + }, + "Status":{ + "shape":"CanaryRunStatus", + "documentation":"

The status of this run.

" + }, + "Timeline":{ + "shape":"CanaryRunTimeline", + "documentation":"

A structure that contains the start and end times of this run.

" + }, + "ArtifactS3Location":{ + "shape":"String", + "documentation":"

The location where the canary stored artifacts from the run. Artifacts include the log file, screenshots, and HAR files.

" + } + }, + "documentation":"

This structure contains the details about one run of one canary.

" + }, + "CanaryRunConfigInput":{ + "type":"structure", + "required":["TimeoutInSeconds"], + "members":{ + "TimeoutInSeconds":{ + "shape":"MaxFifteenMinutesInSeconds", + "documentation":"

How long the canary is allowed to run before it must stop. If you omit this field, the frequency of the canary is used as this value, up to a maximum of 14 minutes.

" + }, + "MemoryInMB":{ + "shape":"MaxSize3008", + "documentation":"

The maximum amount of memory available to the canary while it is running, in MB. The value you specify must be a multiple of 64.

" + } + }, + "documentation":"

A structure that contains input information for a canary run.

" + }, + "CanaryRunConfigOutput":{ + "type":"structure", + "members":{ + "TimeoutInSeconds":{ + "shape":"MaxFifteenMinutesInSeconds", + "documentation":"

How long the canary is allowed to run before it must stop.

" + }, + "MemoryInMB":{ + "shape":"MaxSize3008", + "documentation":"

The maximum amount of memory available to the canary while it is running, in MB. The value you must be a multiple of 64.

" + } + }, + "documentation":"

A structure that contains information for a canary run.

" + }, + "CanaryRunState":{ + "type":"string", + "enum":[ + "RUNNING", + "PASSED", + "FAILED" + ] + }, + "CanaryRunStateReasonCode":{ + "type":"string", + "enum":[ + "CANARY_FAILURE", + "EXECUTION_FAILURE" + ] + }, + "CanaryRunStatus":{ + "type":"structure", + "members":{ + "State":{ + "shape":"CanaryRunState", + "documentation":"

The current state of the run.

" + }, + "StateReason":{ + "shape":"String", + "documentation":"

If run of the canary failed, this field contains the reason for the error.

" + }, + "StateReasonCode":{ + "shape":"CanaryRunStateReasonCode", + "documentation":"

If this value is CANARY_FAILURE, an exception occurred in the canary code. If this value is EXECUTION_FAILURE, an exception occurred in CloudWatch Synthetics.

" + } + }, + "documentation":"

This structure contains the status information about a canary run.

" + }, + "CanaryRunTimeline":{ + "type":"structure", + "members":{ + "Started":{ + "shape":"Timestamp", + "documentation":"

The start time of the run.

" + }, + "Completed":{ + "shape":"Timestamp", + "documentation":"

The end time of the run.

" + } + }, + "documentation":"

This structure contains the start and end times of a single canary run.

" + }, + "CanaryRuns":{ + "type":"list", + "member":{"shape":"CanaryRun"} + }, + "CanaryScheduleInput":{ + "type":"structure", + "required":["Expression"], + "members":{ + "Expression":{ + "shape":"String", + "documentation":"

A rate expression that defines how often the canary is to run. The syntax is rate(number unit). unit can be minute, minutes, or hour.

For example, rate(1 minute) runs the canary once a minute, rate(10 minutes) runs it once every 10 minutes, and rate(1 hour) runs it once every hour. You can specify a frequency between rate(1 minute) and rate(1 hour).

Specifying rate(0 minute) or rate(0 hour) is a special value that causes the canary to run only once when it is started.

" + }, + "DurationInSeconds":{ + "shape":"MaxOneYearInSeconds", + "documentation":"

How long, in seconds, for the canary to continue making regular runs according to the schedule in the Expression value. If you specify 0, the canary continues making runs until you stop it. If you omit this field, the default of 0 is used.

" + } + }, + "documentation":"

This structure specifies how often a canary is to make runs and the date and time when it should stop making runs.

" + }, + "CanaryScheduleOutput":{ + "type":"structure", + "members":{ + "Expression":{ + "shape":"String", + "documentation":"

A rate expression that defines how often the canary is to run. The syntax is rate(number unit). unit can be minute, minutes, or hour.

For example, rate(1 minute) runs the canary once a minute, rate(10 minutes) runs it once every 10 minutes, and rate(1 hour) runs it once every hour.

Specifying rate(0 minute) or rate(0 hour) is a special value that causes the canary to run only once when it is started.

" + }, + "DurationInSeconds":{ + "shape":"MaxOneYearInSeconds", + "documentation":"

How long, in seconds, for the canary to continue making regular runs after it was created. The runs are performed according to the schedule in the Expression value.

" + } + }, + "documentation":"

How long, in seconds, for the canary to continue making regular runs according to the schedule in the Expression value.

" + }, + "CanaryState":{ + "type":"string", + "enum":[ + "CREATING", + "READY", + "STARTING", + "RUNNING", + "UPDATING", + "STOPPING", + "STOPPED", + "ERROR", + "DELETING" + ] + }, + "CanaryStateReasonCode":{ + "type":"string", + "enum":["INVALID_PERMISSIONS"] + }, + "CanaryStatus":{ + "type":"structure", + "members":{ + "State":{ + "shape":"CanaryState", + "documentation":"

The current state of the canary.

" + }, + "StateReason":{ + "shape":"String", + "documentation":"

If the canary has insufficient permissions to run, this field provides more details.

" + }, + "StateReasonCode":{ + "shape":"CanaryStateReasonCode", + "documentation":"

If the canary cannot run or has failed, this field displays the reason.

" + } + }, + "documentation":"

A structure that contains the current state of the canary.

" + }, + "CanaryTimeline":{ + "type":"structure", + "members":{ + "Created":{ + "shape":"Timestamp", + "documentation":"

The date and time the canary was created.

" + }, + "LastModified":{ + "shape":"Timestamp", + "documentation":"

The date and time the canary was most recently modified.

" + }, + "LastStarted":{ + "shape":"Timestamp", + "documentation":"

The date and time that the canary's most recent run started.

" + }, + "LastStopped":{ + "shape":"Timestamp", + "documentation":"

The date and time that the canary's most recent run ended.

" + } + }, + "documentation":"

This structure contains information about when the canary was created and modified.

" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

A conflicting operation is already in progress.

", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateCanaryRequest":{ + "type":"structure", + "required":[ + "Name", + "Code", + "ArtifactS3Location", + "ExecutionRoleArn", + "Schedule", + "RuntimeVersion" + ], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

The name for this canary. Be sure to give it a descriptive name that distinguishes it from other canaries in your account.

Do not include secrets or proprietary information in your canary names. The canary name makes up part of the canary ARN, and the ARN is included in outbound calls over the internet. For more information, see Security Considerations for Synthetics Canaries.

" + }, + "Code":{ + "shape":"CanaryCodeInput", + "documentation":"

A structure that includes the entry point from which the canary should start running your script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included.

" + }, + "ArtifactS3Location":{ + "shape":"String", + "documentation":"

The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files.

" + }, + "ExecutionRoleArn":{ + "shape":"Arn", + "documentation":"

The ARN of the IAM role to be used to run the canary. This role must already exist, and must include lambda.amazonaws.com as a principal in the trust policy. The role must also have the following permissions:

  • s3:PutObject

  • s3:GetBucketLocation

  • s3:ListAllMyBuckets

  • cloudwatch:PutMetricData

  • logs:CreateLogGroup

  • logs:CreateLogStream

  • logs:CreateLogStream

" + }, + "Schedule":{ + "shape":"CanaryScheduleInput", + "documentation":"

A structure that contains information about how often the canary is to run and when these test runs are to stop.

" + }, + "RunConfig":{ + "shape":"CanaryRunConfigInput", + "documentation":"

A structure that contains the configuration for individual canary runs, such as timeout value.

" + }, + "SuccessRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

The number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.

" + }, + "FailureRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.

" + }, + "RuntimeVersion":{ + "shape":"String", + "documentation":"

Specifies the runtime version to use for the canary. Currently, the only valid value is syn-1.0. For more information about runtime versions, see Canary Runtime Versions.

" + }, + "VpcConfig":{ + "shape":"VpcConfigInput", + "documentation":"

If this canary is to test an endpoint in a VPC, this structure contains information about the subnet and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

A list of key-value pairs to associate with the canary. You can associate as many as 50 tags with a canary.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only the resources that have certain tag values.

" + } + } + }, + "CreateCanaryResponse":{ + "type":"structure", + "members":{ + "Canary":{ + "shape":"Canary", + "documentation":"

The full details about the canary you have created.

" + } + } + }, + "DeleteCanaryRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

The name of the canary that you want to delete. To find the names of your canaries, use DescribeCanaries.

", + "location":"uri", + "locationName":"name" + } + } + }, + "DeleteCanaryResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeCanariesLastRunRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

A token that indicates that there is more data available. You can use this token in a subsequent DescribeCanaries operation to retrieve the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

Specify this parameter to limit how many runs are returned each time you use the DescribeLastRun operation. If you omit this parameter, the default of 100 is used.

" + } + } + }, + "DescribeCanariesLastRunResponse":{ + "type":"structure", + "members":{ + "CanariesLastRun":{ + "shape":"CanariesLastRun", + "documentation":"

An array that contains the information from the most recent run of each canary.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token that indicates that there is more data available. You can use this token in a subsequent DescribeCanariesLastRun operation to retrieve the next set of results.

" + } + } + }, + "DescribeCanariesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

A token that indicates that there is more data available. You can use this token in a subsequent operation to retrieve the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxCanaryResults", + "documentation":"

Specify this parameter to limit how many canaries are returned each time you use the DescribeCanaries operation. If you omit this parameter, the default of 100 is used.

" + } + } + }, + "DescribeCanariesResponse":{ + "type":"structure", + "members":{ + "Canaries":{ + "shape":"Canaries", + "documentation":"

Returns an array. Each item in the array contains the full information about one canary.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token that indicates that there is more data available. You can use this token in a subsequent DescribeCanaries operation to retrieve the next set of results.

" + } + } + }, + "DescribeRuntimeVersionsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

A token that indicates that there is more data available. You can use this token in a subsequent DescribeRuntimeVersions operation to retrieve the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

Specify this parameter to limit how many runs are returned each time you use the DescribeRuntimeVersions operation. If you omit this parameter, the default of 100 is used.

" + } + } + }, + "DescribeRuntimeVersionsResponse":{ + "type":"structure", + "members":{ + "RuntimeVersions":{ + "shape":"RuntimeVersionList", + "documentation":"

An array of objects that display the details about each Synthetics canary runtime version.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token that indicates that there is more data available. You can use this token in a subsequent DescribeRuntimeVersions operation to retrieve the next set of results.

" + } + } + }, + "ErrorMessage":{"type":"string"}, + "GetCanaryRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

The name of the canary that you want details for.

", + "location":"uri", + "locationName":"name" + } + } + }, + "GetCanaryResponse":{ + "type":"structure", + "members":{ + "Canary":{ + "shape":"Canary", + "documentation":"

A strucure that contains the full information about the canary.

" + } + } + }, + "GetCanaryRunsRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

The name of the canary that you want to see runs for.

", + "location":"uri", + "locationName":"name" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token that indicates that there is more data available. You can use this token in a subsequent GetCanaryRuns operation to retrieve the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxSize100", + "documentation":"

Specify this parameter to limit how many runs are returned each time you use the GetCanaryRuns operation. If you omit this parameter, the default of 100 is used.

" + } + } + }, + "GetCanaryRunsResponse":{ + "type":"structure", + "members":{ + "CanaryRuns":{ + "shape":"CanaryRuns", + "documentation":"

An array of structures. Each structure contains the details of one of the retrieved canary runs.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

A token that indicates that there is more data available. You can use this token in a subsequent GetCanaryRuns operation to retrieve the next set of results.

" + } + } + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

An unknown internal error occurred.

", + "error":{"httpStatusCode":500}, + "exception":true + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the canary that you want to view tags for.

The ARN format of a canary is arn:aws:synthetics:Region:account-id:canary:canary-name .

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

The list of tag keys and values associated with the canary that you specified.

" + } + } + }, + "MaxCanaryResults":{ + "type":"integer", + "max":20, + "min":1 + }, + "MaxFifteenMinutesInSeconds":{ + "type":"integer", + "max":900, + "min":60 + }, + "MaxOneYearInSeconds":{ + "type":"long", + "max":31622400, + "min":0 + }, + "MaxSize100":{ + "type":"integer", + "max":100, + "min":1 + }, + "MaxSize1024":{ + "type":"integer", + "max":1024, + "min":1 + }, + "MaxSize3008":{ + "type":"integer", + "max":3008, + "min":960 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

One of the specified resources was not found.

", + "error":{"httpStatusCode":404}, + "exception":true + }, + "RuntimeVersion":{ + "type":"structure", + "members":{ + "VersionName":{ + "shape":"String", + "documentation":"

The name of the runtime version. Currently, the only valid value is syn-1.0.

Specifies the runtime version to use for the canary. Currently, the only valid value is syn-1.0.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description of the runtime version, created by Amazon.

" + }, + "ReleaseDate":{ + "shape":"Timestamp", + "documentation":"

The date that the runtime version was released.

" + }, + "DeprecationDate":{ + "shape":"Timestamp", + "documentation":"

If this runtime version is deprecated, this value is the date of deprecation.

" + } + }, + "documentation":"

This structure contains information about one canary runtime version. For more information about runtime versions, see Canary Runtime Versions.

" + }, + "RuntimeVersionList":{ + "type":"list", + "member":{"shape":"RuntimeVersion"} + }, + "SecurityGroupId":{"type":"string"}, + "SecurityGroupIds":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":5, + "min":0 + }, + "StartCanaryRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

The name of the canary that you want to run. To find canary names, use DescribeCanaries.

", + "location":"uri", + "locationName":"name" + } + } + }, + "StartCanaryResponse":{ + "type":"structure", + "members":{ + } + }, + "StopCanaryRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

The name of the canary that you want to stop. To find the names of your canaries, use DescribeCanaries.

", + "location":"uri", + "locationName":"name" + } + } + }, + "StopCanaryResponse":{ + "type":"structure", + "members":{ + } + }, + "String":{ + "type":"string", + "max":1024, + "min":1 + }, + "SubnetId":{"type":"string"}, + "SubnetIds":{ + "type":"list", + "member":{"shape":"SubnetId"}, + "max":16, + "min":0 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the canary that you're adding tags to.

The ARN format of a canary is arn:aws:synthetics:Region:account-id:canary:canary-name .

", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The list of key-value pairs to associate with the canary.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, + "Timestamp":{"type":"timestamp"}, + "Token":{ + "type":"string", + "pattern":"^[a-zA-Z0-9=/+_.-]{4,252}$" + }, + "UUID":{ + "type":"string", + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the canary that you're removing tags from.

The ARN format of a canary is arn:aws:synthetics:Region:account-id:canary:canary-name .

", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The list of tag keys to remove from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateCanaryRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"CanaryName", + "documentation":"

The name of the canary that you want to update. To find the names of your canaries, use DescribeCanaries.

You cannot change the name of a canary that has already been created.

", + "location":"uri", + "locationName":"name" + }, + "Code":{ + "shape":"CanaryCodeInput", + "documentation":"

A structure that includes the entry point from which the canary should start running your script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included.

" + }, + "ExecutionRoleArn":{ + "shape":"Arn", + "documentation":"

The ARN of the IAM role to be used to run the canary. This role must already exist, and must include lambda.amazonaws.com as a principal in the trust policy. The role must also have the following permissions:

  • s3:PutObject

  • s3:GetBucketLocation

  • s3:ListAllMyBuckets

  • cloudwatch:PutMetricData

  • logs:CreateLogGroup

  • logs:CreateLogStream

  • logs:CreateLogStream

" + }, + "RuntimeVersion":{ + "shape":"String", + "documentation":"

Specifies the runtime version to use for the canary. Currently, the only valid value is syn-1.0. For more information about runtime versions, see Canary Runtime Versions.

" + }, + "Schedule":{ + "shape":"CanaryScheduleInput", + "documentation":"

A structure that contains information about how often the canary is to run, and when these runs are to stop.

" + }, + "RunConfig":{ + "shape":"CanaryRunConfigInput", + "documentation":"

A structure that contains the timeout value that is used for each individual run of the canary.

" + }, + "SuccessRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

The number of days to retain data about successful runs of this canary.

" + }, + "FailureRetentionPeriodInDays":{ + "shape":"MaxSize1024", + "documentation":"

The number of days to retain data about failed runs of this canary.

" + }, + "VpcConfig":{ + "shape":"VpcConfigInput", + "documentation":"

If this canary is to test an endpoint in a VPC, this structure contains information about the subnet and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

" + } + } + }, + "UpdateCanaryResponse":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

A parameter could not be validated.

", + "error":{"httpStatusCode":400}, + "exception":true + }, + "VpcConfigInput":{ + "type":"structure", + "members":{ + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

The IDs of the subnets where this canary is to run.

" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

The IDs of the security groups for this canary.

" + } + }, + "documentation":"

If this canary is to test an endpoint in a VPC, this structure contains information about the subnets and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

" + }, + "VpcConfigOutput":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"VpcId", + "documentation":"

The IDs of the VPC where this canary is to run.

" + }, + "SubnetIds":{ + "shape":"SubnetIds", + "documentation":"

The IDs of the subnets where this canary is to run.

" + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

The IDs of the security groups for this canary.

" + } + }, + "documentation":"

If this canary is to test an endpoint in a VPC, this structure contains information about the subnets and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

" + }, + "VpcId":{"type":"string"} + }, + "documentation":"Amazon CloudWatch Synthetics

You can use Amazon CloudWatch Synthetics to continually monitor your services. You can create and manage canaries, which are modular, lightweight scripts that monitor your endpoints and APIs from the outside-in. You can set up your canaries to run 24 hours a day, once per minute. The canaries help you check the availability and latency of your web services and troubleshoot anomalies by investigating load time data, screenshots of the UI, logs, and metrics. The canaries seamlessly integrate with CloudWatch ServiceLens to help you trace the causes of impacted nodes in your applications. For more information, see Using ServiceLens to Monitor the Health of Your Applications in the Amazon CloudWatch User Guide.

Before you create and manage canaries, be aware of the security considerations. For more information, see Security Considerations for Synthetics Canaries.

" +} diff --git a/services/textract/pom.xml b/services/textract/pom.xml index 6b9ff16c485b..8ed84ffd1592 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index e00ecbf15e6b..f8c267710f29 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribe/src/main/resources/codegen-resources/paginators-1.json b/services/transcribe/src/main/resources/codegen-resources/paginators-1.json index aded8e376efe..e6d8ba9920e9 100644 --- a/services/transcribe/src/main/resources/codegen-resources/paginators-1.json +++ b/services/transcribe/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,15 @@ { "pagination": { + "ListMedicalTranscriptionJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListMedicalVocabularies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListTranscriptionJobs": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/transcribe/src/main/resources/codegen-resources/service-2.json b/services/transcribe/src/main/resources/codegen-resources/service-2.json index fc8743b79d19..0273902dbafb 100644 --- a/services/transcribe/src/main/resources/codegen-resources/service-2.json +++ b/services/transcribe/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,22 @@ "uid":"transcribe-2017-10-26" }, "operations":{ + "CreateMedicalVocabulary":{ + "name":"CreateMedicalVocabulary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMedicalVocabularyRequest"}, + "output":{"shape":"CreateMedicalVocabularyResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Creates a new custom vocabulary that you can use to change how Amazon Transcribe Medical transcribes your audio file.

" + }, "CreateVocabulary":{ "name":"CreateVocabulary", "http":{ @@ -45,6 +61,35 @@ ], "documentation":"

Creates a new vocabulary filter that you can use to filter words, such as profane words, from the output of a transcription job.

" }, + "DeleteMedicalTranscriptionJob":{ + "name":"DeleteMedicalTranscriptionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMedicalTranscriptionJobRequest"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Deletes a transcription job generated by Amazon Transcribe Medical and any related information.

" + }, + "DeleteMedicalVocabulary":{ + "name":"DeleteMedicalVocabulary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMedicalVocabularyRequest"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Deletes a vocabulary from Amazon Transcribe Medical.

" + }, "DeleteTranscriptionJob":{ "name":"DeleteTranscriptionJob", "http":{ @@ -89,6 +134,38 @@ ], "documentation":"

Removes a vocabulary filter.

" }, + "GetMedicalTranscriptionJob":{ + "name":"GetMedicalTranscriptionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMedicalTranscriptionJobRequest"}, + "output":{"shape":"GetMedicalTranscriptionJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Returns information about a transcription job from Amazon Transcribe Medical. To see the status of the job, check the TranscriptionJobStatus field. If the status is COMPLETED, the job is finished. You find the results of the completed job in the TranscriptFileUri field.

" + }, + "GetMedicalVocabulary":{ + "name":"GetMedicalVocabulary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMedicalVocabularyRequest"}, + "output":{"shape":"GetMedicalVocabularyResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Retrieve information about a medical vocabulary.

" + }, "GetTranscriptionJob":{ "name":"GetTranscriptionJob", "http":{ @@ -137,6 +214,36 @@ ], "documentation":"

Returns information about a vocabulary filter.

" }, + "ListMedicalTranscriptionJobs":{ + "name":"ListMedicalTranscriptionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMedicalTranscriptionJobsRequest"}, + "output":{"shape":"ListMedicalTranscriptionJobsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists medical transcription jobs with a specified status or substring that matches their names.

" + }, + "ListMedicalVocabularies":{ + "name":"ListMedicalVocabularies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMedicalVocabulariesRequest"}, + "output":{"shape":"ListMedicalVocabulariesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Returns a list of vocabularies that match the specified criteria. You get the entire list of vocabularies if you don't enter a value in any of the request parameters.

" + }, "ListTranscriptionJobs":{ "name":"ListTranscriptionJobs", "http":{ @@ -182,6 +289,22 @@ ], "documentation":"

Gets information about vocabulary filters.

" }, + "StartMedicalTranscriptionJob":{ + "name":"StartMedicalTranscriptionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartMedicalTranscriptionJobRequest"}, + "output":{"shape":"StartMedicalTranscriptionJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Start a batch job to transcribe medical speech to text.

" + }, "StartTranscriptionJob":{ "name":"StartTranscriptionJob", "http":{ @@ -198,6 +321,23 @@ ], "documentation":"

Starts an asynchronous job to transcribe speech to text.

" }, + "UpdateMedicalVocabulary":{ + "name":"UpdateMedicalVocabulary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMedicalVocabularyRequest"}, + "output":{"shape":"UpdateMedicalVocabularyResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Updates an existing vocabulary with new values in a different text file. The UpdateMedicalVocabulary operation overwrites all of the existing information with the values that you provide in the request.

" + }, "UpdateVocabulary":{ "name":"UpdateVocabulary", "http":{ @@ -247,7 +387,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

When you are using the CreateVocabulary operation, the JobName field is a duplicate of a previously entered job name. Resend your request with a different name.

When you are using the UpdateVocabulary operation, there are two jobs running at the same time. Resend the second request later.

", + "documentation":"

The resource name already exists.

", "exception":true }, "ContentRedaction":{ @@ -263,10 +403,57 @@ }, "RedactionOutput":{ "shape":"RedactionOutput", - "documentation":"

Request parameter where you choose whether to output only the redacted transcript or generate an additional unredacted transcript.

When you choose redacted Amazon Transcribe outputs a JSON file with only the redacted transcript and related information.

When you choose redacted_and_unredacted Amazon Transcribe outputs a JSON file with the unredacted transcript and related information in addition to the JSON file with the redacted transcript.

" + "documentation":"

The output transcript file stored in either the default S3 bucket or in a bucket you specify.

When you choose redacted Amazon Transcribe outputs only the redacted transcript.

When you choose redacted_and_unredacted Amazon Transcribe outputs both the redacted and unredacted transcripts.

" } }, - "documentation":"

Settings for content redaction within a transcription job.

You can redact transcripts in US English (en-us). For more information see: Automatic Content Redaction

" + "documentation":"

Settings for content redaction within a transcription job.

" + }, + "CreateMedicalVocabularyRequest":{ + "type":"structure", + "required":[ + "VocabularyName", + "LanguageCode", + "VocabularyFileUri" + ], + "members":{ + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The name of the custom vocabulary. This case-sensitive name must be unique within an AWS account. If you try to create a vocabulary with the same name as a previous vocabulary you will receive a ConflictException error.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code used for the entries within your custom vocabulary. The language code of your custom vocabulary must match the language code of your transcription job. US English (en-US) is the only language code available for Amazon Transcribe Medical.

" + }, + "VocabularyFileUri":{ + "shape":"Uri", + "documentation":"

The Amazon S3 location of the text file you use to define your custom vocabulary. The URI must be in the same AWS region as the API endpoint you're calling. Enter information about your VocabularyFileUri in the following format:

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

This is an example of a vocabulary file uri location in Amazon S3:

https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Medical Custom Vocabularies.

" + } + } + }, + "CreateMedicalVocabularyResponse":{ + "type":"structure", + "members":{ + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The name of the vocabulary. The name must be unique within an AWS account. It is also case-sensitive.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code you chose to describe the entries in your custom vocabulary. US English (en-US) is the only valid language code for Amazon Transcribe Medical.

" + }, + "VocabularyState":{ + "shape":"VocabularyState", + "documentation":"

The processing state of your custom vocabulary in Amazon Transcribe Medical. If the state is READY you can use the vocabulary in a StartMedicalTranscriptionJob request.

" + }, + "LastModifiedTime":{ + "shape":"DateTime", + "documentation":"

The date and time you created the vocabulary.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the VocabularyState field is FAILED, this field contains information about why the job failed.

" + } + } }, "CreateVocabularyFilterRequest":{ "type":"structure", @@ -277,7 +464,7 @@ "members":{ "VocabularyFilterName":{ "shape":"VocabularyFilterName", - "documentation":"

The vocabulary filter name. The name must be unique within the account that contains it.

" + "documentation":"

The vocabulary filter name. The name must be unique within the account that contains it.If you try to create a vocabulary filter with the same name as a previous vocabulary filter you will receive a ConflictException error.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -319,7 +506,7 @@ "members":{ "VocabularyName":{ "shape":"VocabularyName", - "documentation":"

The name of the vocabulary. The name must be unique within an AWS account. The name is case-sensitive.

" + "documentation":"

The name of the vocabulary. The name must be unique within an AWS account. The name is case-sensitive. If you try to create a vocabulary with the same name as a previous vocabulary you will receive a ConflictException error.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -331,7 +518,7 @@ }, "VocabularyFileUri":{ "shape":"Uri", - "documentation":"

The S3 location of the text file that contains the definition of the custom vocabulary. The URI must be in the same region as the API endpoint that you are calling. The general form is

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

For example:

https://s3.us-east-1.amazonaws.com/examplebucket/vocab.txt

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Custom Vocabularies.

" + "documentation":"

The S3 location of the text file that contains the definition of the custom vocabulary. The URI must be in the same region as the API endpoint that you are calling. The general form is

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Custom Vocabularies.

" } } }, @@ -365,6 +552,26 @@ "pattern":"^arn:aws:iam::[0-9]{0,63}:role/[A-Za-z0-9:_/+=,@.-]{0,1023}$" }, "DateTime":{"type":"timestamp"}, + "DeleteMedicalTranscriptionJobRequest":{ + "type":"structure", + "required":["MedicalTranscriptionJobName"], + "members":{ + "MedicalTranscriptionJobName":{ + "shape":"TranscriptionJobName", + "documentation":"

The name you provide to the DeleteMedicalTranscriptionJob object to delete a transcription job.

" + } + } + }, + "DeleteMedicalVocabularyRequest":{ + "type":"structure", + "required":["VocabularyName"], + "members":{ + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The name of the vocabulary you are choosing to delete.

" + } + } + }, "DeleteTranscriptionJobRequest":{ "type":"structure", "required":["TranscriptionJobName"], @@ -396,6 +603,64 @@ } }, "FailureReason":{"type":"string"}, + "GetMedicalTranscriptionJobRequest":{ + "type":"structure", + "required":["MedicalTranscriptionJobName"], + "members":{ + "MedicalTranscriptionJobName":{ + "shape":"TranscriptionJobName", + "documentation":"

The name of the medical transcription job.

" + } + } + }, + "GetMedicalTranscriptionJobResponse":{ + "type":"structure", + "members":{ + "MedicalTranscriptionJob":{ + "shape":"MedicalTranscriptionJob", + "documentation":"

An object that contains the results of the medical transcription job.

" + } + } + }, + "GetMedicalVocabularyRequest":{ + "type":"structure", + "required":["VocabularyName"], + "members":{ + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The name of the vocabulary you are trying to get information about. The value you enter for this request is case-sensitive.

" + } + } + }, + "GetMedicalVocabularyResponse":{ + "type":"structure", + "members":{ + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The valid name that Amazon Transcribe Medical returns.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The valid language code returned for your vocabulary entries.

" + }, + "VocabularyState":{ + "shape":"VocabularyState", + "documentation":"

The processing state of the vocabulary.

" + }, + "LastModifiedTime":{ + "shape":"DateTime", + "documentation":"

The date and time the vocabulary was last modified with a text file different from what was previously used.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the VocabularyState is FAILED, this field contains information about why the job failed.

" + }, + "DownloadUri":{ + "shape":"Uri", + "documentation":"

The Amazon S3 location where the vocabulary is stored. Use this URI to get the contents of the vocabulary. You can download your vocabulary from the URI for a limited time.

" + } + } + }, "GetTranscriptionJobRequest":{ "type":"structure", "required":["TranscriptionJobName"], @@ -499,11 +764,11 @@ "members":{ "AllowDeferredExecution":{ "shape":"Boolean", - "documentation":"

Indicates whether a job should be queued by Amazon Transcribe when the concurrent execution limit is exceeded. When the AllowDeferredExecution field is true, jobs are queued and will be executed when the number of executing jobs falls below the concurrent execution limit. If the field is false, Amazon Transcribe returns a LimitExceededException exception.

If you specify the AllowDeferredExecution field, you must specify the DataAccessRoleArn field.

" + "documentation":"

Indicates whether a job should be queued by Amazon Transcribe when the concurrent execution limit is exceeded. When the AllowDeferredExecution field is true, jobs are queued and executed when the number of executing jobs falls below the concurrent execution limit. If the field is false, Amazon Transcribe returns a LimitExceededException exception.

If you specify the AllowDeferredExecution field, you must specify the DataAccessRoleArn field.

" }, "DataAccessRoleArn":{ "shape":"DataAccessRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of a role that has access to the S3 bucket that contains the input files. Amazon Transcribe will assume this role to read queued media files. If you have specified an output S3 bucket for the transcription results, this role should have access to the output bucket as well.

If you specify the AllowDeferredExecution field, you must specify the DataAccessRoleArn field.

" + "documentation":"

The Amazon Resource Name (ARN) of a role that has access to the S3 bucket that contains the input files. Amazon Transcribe assumes this role to read queued media files. If you have specified an output S3 bucket for the transcription results, this role should have access to the output bucket as well.

If you specify the AllowDeferredExecution field, you must specify the DataAccessRoleArn field.

" } }, "documentation":"

Provides information about when a transcription job should be executed.

" @@ -558,6 +823,82 @@ "documentation":"

Either you have sent too many requests or your input file is too long. Wait before you resend your request, or use a smaller file and resend the request.

", "exception":true }, + "ListMedicalTranscriptionJobsRequest":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"TranscriptionJobStatus", + "documentation":"

When specified, returns only medical transcription jobs with the specified status. Jobs are ordered by creation date, with the newest jobs returned first. If you don't specify a status, Amazon Transcribe Medical returns all transcription jobs ordered by creation date.

" + }, + "JobNameContains":{ + "shape":"TranscriptionJobName", + "documentation":"

When specified, the jobs returned in the list are limited to jobs whose name contains the specified string.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If you a receive a truncated result in the previous request of ListMedicalTranscriptionJobs, include NextToken to fetch the next set of jobs.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of medical transcription jobs to return in the response. IF there are fewer results in the list, this response contains only the actual results.

" + } + } + }, + "ListMedicalTranscriptionJobsResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"TranscriptionJobStatus", + "documentation":"

The requested status of the medical transcription jobs returned.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The ListMedicalTranscriptionJobs operation returns a page of jobs at a time. The maximum size of the page is set by the MaxResults parameter. If the number of jobs exceeds what can fit on a page, Amazon Transcribe Medical returns the NextPage token. Include the token in the next request to the ListMedicalTranscriptionJobs operation to return in the next page of jobs.

" + }, + "MedicalTranscriptionJobSummaries":{ + "shape":"MedicalTranscriptionJobSummaries", + "documentation":"

A list of objects containing summary information for a transcription job.

" + } + } + }, + "ListMedicalVocabulariesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of your previous request to ListMedicalVocabularies was truncated, include the NextToken to fetch the next set of jobs.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of vocabularies to return in the response.

" + }, + "StateEquals":{ + "shape":"VocabularyState", + "documentation":"

When specified, only returns vocabularies with the VocabularyState equal to the specified vocabulary state.

" + }, + "NameContains":{ + "shape":"VocabularyName", + "documentation":"

Returns vocabularies in the list whose name contains the specified string. The search is case-insensitive, ListMedicalVocabularies returns both \"vocabularyname\" and \"VocabularyName\" in the response list.

" + } + } + }, + "ListMedicalVocabulariesResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"VocabularyState", + "documentation":"

The requested vocabulary state.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The ListMedicalVocabularies operation returns a page of vocabularies at a time. The maximum size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Transcribe Medical returns the NextPage token. Include the token in the next request to the ListMedicalVocabularies operation to return the next page of jobs.

" + }, + "Vocabularies":{ + "shape":"Vocabularies", + "documentation":"

A list of objects that describe the vocabularies that match the search criteria in the request.

" + } + } + }, "ListTranscriptionJobsRequest":{ "type":"structure", "members":{ @@ -613,7 +954,7 @@ }, "NameContains":{ "shape":"VocabularyName", - "documentation":"

When specified, the vocabularies returned in the list are limited to vocabularies whose name contains the specified string. The search is case-insensitive, ListVocabularies will return both \"vocabularyname\" and \"VocabularyName\" in the response list.

" + "documentation":"

When specified, the vocabularies returned in the list are limited to vocabularies whose name contains the specified string. The search is case-insensitive, ListVocabularies returns both \"vocabularyname\" and \"VocabularyName\" in the response list.

" } } }, @@ -621,7 +962,7 @@ "type":"structure", "members":{ "Status":{ - "shape":"TranscriptionJobStatus", + "shape":"VocabularyState", "documentation":"

The requested vocabulary state.

" }, "NextToken":{ @@ -660,7 +1001,7 @@ }, "VocabularyFilters":{ "shape":"VocabularyFilters", - "documentation":"

The list of vocabulary filters. It will contain at most MaxResults number of filters. If there are more filters, call the ListVocabularyFilters operation again with the NextToken parameter in the request set to the value of the NextToken field in the response.

" + "documentation":"

The list of vocabulary filters. It contains at most MaxResults number of filters. If there are more filters, call the ListVocabularyFilters operation again with the NextToken parameter in the request set to the value of the NextToken field in the response.

" } } }, @@ -684,7 +1025,7 @@ "members":{ "MediaFileUri":{ "shape":"Uri", - "documentation":"

The S3 object location of the input media file. The URI must be in the same region as the API endpoint that you are calling. The general form is:

s3://<bucket-name>/<keyprefix>/<objectkey>

For example:

s3://examplebucket/example.mp4

s3://examplebucket/mediadocs/example.mp4

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

" + "documentation":"

The S3 object location of the input media file. The URI must be in the same region as the API endpoint that you are calling. The general form is:

For example:

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

" } }, "documentation":"

Describes the input media file in a transcription request.

" @@ -703,6 +1044,155 @@ "max":48000, "min":8000 }, + "MedicalTranscript":{ + "type":"structure", + "members":{ + "TranscriptFileUri":{ + "shape":"Uri", + "documentation":"

The S3 object location of the medical transcript.

Use this URI to access the medical transcript. This URI points to the S3 bucket you created to store the medical transcript.

" + } + }, + "documentation":"

Identifies the location of a medical transcript.

" + }, + "MedicalTranscriptionJob":{ + "type":"structure", + "members":{ + "MedicalTranscriptionJobName":{ + "shape":"TranscriptionJobName", + "documentation":"

The name for a given medical transcription job.

" + }, + "TranscriptionJobStatus":{ + "shape":"TranscriptionJobStatus", + "documentation":"

The completion status of a medical transcription job.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code for the language spoken in the source audio file. US English (en-US) is the only supported language for medical transcriptions. Any other value you enter for language code results in a BadRequestException error.

" + }, + "MediaSampleRateHertz":{ + "shape":"MediaSampleRateHertz", + "documentation":"

The sample rate, in Hertz, of the source audio containing medical information.

If you don't specify the sample rate, Amazon Transcribe Medical determines it for you. If you choose to specify the sample rate, it must match the rate detected by Amazon Transcribe Medical. In most cases, you should leave the MediaSampleHertz blank and let Amazon Transcribe Medical determine the sample rate.

" + }, + "MediaFormat":{ + "shape":"MediaFormat", + "documentation":"

The format of the input media file.

" + }, + "Media":{"shape":"Media"}, + "Transcript":{ + "shape":"MedicalTranscript", + "documentation":"

An object that contains the MedicalTranscript. The MedicalTranscript contains the TranscriptFileUri.

" + }, + "StartTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the job started processing.

" + }, + "CreationTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the job was created.

" + }, + "CompletionTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the job was completed.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the TranscriptionJobStatus field is FAILED, this field contains information about why the job failed.

The FailureReason field contains one of the following values:

  • Unsupported media format- The media format specified in the MediaFormat field of the request isn't valid. See the description of the MediaFormat field for a list of valid values.

  • The media format provided does not match the detected media format- The media format of the audio file doesn't match the format specified in the MediaFormat field in the request. Check the media format of your media file and make sure the two values match.

  • Invalid sample rate for audio file- The sample rate specified in the MediaSampleRateHertz of the request isn't valid. The sample rate must be between 8000 and 48000 Hertz.

  • The sample rate provided does not match the detected sample rate- The sample rate in the audio file doesn't match the sample rate specified in the MediaSampleRateHertz field in the request. Check the sample rate of your media file and make sure that the two values match.

  • Invalid file size: file size too large- The size of your audio file is larger than what Amazon Transcribe Medical can process. For more information, see Guidlines and Quotas in the Amazon Transcribe Medical Guide

  • Invalid number of channels: number of channels too large- Your audio contains more channels than Amazon Transcribe Medical is configured to process. To request additional channels, see Amazon Transcribe Medical Endpoints and Quotas in the Amazon Web Services General Reference

" + }, + "Settings":{ + "shape":"MedicalTranscriptionSetting", + "documentation":"

Object that contains object.

" + }, + "Specialty":{ + "shape":"Specialty", + "documentation":"

The medical specialty of any clinicians providing a dictation or having a conversation. PRIMARYCARE is the only available setting for this object. This specialty enables you to generate transcriptions for the following medical fields:

  • Family Medicine

" + }, + "Type":{ + "shape":"Type", + "documentation":"

The type of speech in the transcription job. CONVERSATION is generally used for patient-physician dialogues. DICTATION is the setting for physicians speaking their notes after seeing a patient. For more information, see how-it-works-med

" + } + }, + "documentation":"

The data structure that containts the information for a medical transcription job.

" + }, + "MedicalTranscriptionJobSummaries":{ + "type":"list", + "member":{"shape":"MedicalTranscriptionJobSummary"} + }, + "MedicalTranscriptionJobSummary":{ + "type":"structure", + "members":{ + "MedicalTranscriptionJobName":{ + "shape":"TranscriptionJobName", + "documentation":"

The name of a medical transcription job.

" + }, + "CreationTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the medical transcription job was created.

" + }, + "StartTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the job began processing.

" + }, + "CompletionTime":{ + "shape":"DateTime", + "documentation":"

A timestamp that shows when the job was completed.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language of the transcript in the source audio file.

" + }, + "TranscriptionJobStatus":{ + "shape":"TranscriptionJobStatus", + "documentation":"

The status of the medical transcription job.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the TranscriptionJobStatus field is FAILED, a description of the error.

" + }, + "OutputLocationType":{ + "shape":"OutputLocationType", + "documentation":"

Indicates the location of the transcription job's output.

The CUSTOMER_BUCKET is the S3 location provided in the OutputBucketName field when the

" + }, + "Specialty":{ + "shape":"Specialty", + "documentation":"

The medical specialty of the transcription job. Primary care is the only valid value.

" + }, + "Type":{ + "shape":"Type", + "documentation":"

The speech of the clinician in the input audio.

" + } + }, + "documentation":"

Provides summary information about a transcription job.

" + }, + "MedicalTranscriptionSetting":{ + "type":"structure", + "members":{ + "ShowSpeakerLabels":{ + "shape":"Boolean", + "documentation":"

Determines whether the transcription job uses speaker recognition to identify different speakers in the input audio. Speaker recongition labels individual speakers in the audio file. If you set the ShowSpeakerLabels field to true, you must also set the maximum number of speaker labels in the MaxSpeakerLabels field.

You can't set both ShowSpeakerLabels and ChannelIdentification in the same request. If you set both, your request returns a BadRequestException.

" + }, + "MaxSpeakerLabels":{ + "shape":"MaxSpeakers", + "documentation":"

The maximum number of speakers to identify in the input audio. If there are more speakers in the audio than this number, multiple speakers are identified as a single speaker. If you specify the MaxSpeakerLabels field, you must set the ShowSpeakerLabels field to true.

" + }, + "ChannelIdentification":{ + "shape":"Boolean", + "documentation":"

Instructs Amazon Transcribe Medical to process each audio channel separately and then merge the transcription output of each channel into a single transcription.

Amazon Transcribe Medical also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of item. The alternative transcriptions also come with confidence scores provided by Amazon Transcribe Medical.

You can't set both ShowSpeakerLabels and ChannelIdentification in the same request. If you set both, your request returns a BadRequestException

" + }, + "ShowAlternatives":{ + "shape":"Boolean", + "documentation":"

Determines whether alternative transcripts are generated along with the transcript that has the highest confidence. If you set ShowAlternatives field to true, you must also set the maximum number of alternatives to return in the MaxAlternatives field.

" + }, + "MaxAlternatives":{ + "shape":"MaxAlternatives", + "documentation":"

The maximum number of alternatives that you tell the service to return. If you specify the MaxAlternatives field, you must set the ShowAlternatives field to true.

" + }, + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The name of the vocabulary to use when processing a medical transcription job.

" + } + }, + "documentation":"

Optional settings for the StartMedicalTranscriptionJob operation.

" + }, "NextToken":{ "type":"string", "max":8192, @@ -762,7 +1252,7 @@ }, "MaxSpeakerLabels":{ "shape":"MaxSpeakers", - "documentation":"

The maximum number of speakers to identify in the input audio. If there are more speakers in the audio than this number, multiple speakers will be identified as a single speaker. If you specify the MaxSpeakerLabels field, you must set the ShowSpeakerLabels field to true.

" + "documentation":"

The maximum number of speakers to identify in the input audio. If there are more speakers in the audio than this number, multiple speakers are identified as a single speaker. If you specify the MaxSpeakerLabels field, you must set the ShowSpeakerLabels field to true.

" }, "ChannelIdentification":{ "shape":"Boolean", @@ -787,6 +1277,69 @@ }, "documentation":"

Provides optional settings for the StartTranscriptionJob operation.

" }, + "Specialty":{ + "type":"string", + "enum":["PRIMARYCARE"] + }, + "StartMedicalTranscriptionJobRequest":{ + "type":"structure", + "required":[ + "MedicalTranscriptionJobName", + "LanguageCode", + "Media", + "OutputBucketName", + "Specialty", + "Type" + ], + "members":{ + "MedicalTranscriptionJobName":{ + "shape":"TranscriptionJobName", + "documentation":"

The name of the medical transcription job. You can't use the strings \".\" or \"..\" by themselves as the job name. The name must also be unique within an AWS account. If you try to create a medical transcription job with the same name as a previous medical transcription job you will receive a ConflictException error.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code for the language spoken in the input media file. US English (en-US) is the valid value for medical transcription jobs. Any other value you enter for language code results in a BadRequestException error.

" + }, + "MediaSampleRateHertz":{ + "shape":"MediaSampleRateHertz", + "documentation":"

The sample rate, in Hertz, of the audio track in the input media file.

If you do not specify the media sample rate, Amazon Transcribe Medical determines the sample rate. If you specify the sample rate, it must match the rate detected by Amazon Transcribe Medical. In most cases, you should leave the MediaSampleRateHertz field blank and let Amazon Transcribe Medical determine the sample rate.

" + }, + "MediaFormat":{ + "shape":"MediaFormat", + "documentation":"

The audio format of the input media file.

" + }, + "Media":{"shape":"Media"}, + "OutputBucketName":{ + "shape":"OutputBucketName", + "documentation":"

The Amazon S3 location where the transcription is stored.

You must set OutputBucketName for Amazon Transcribe Medical to store the transcription results. Your transcript appears in the S3 location you specify. When you call the GetMedicalTranscriptionJob, the operation returns this location in the TranscriptFileUri field. The S3 bucket must have permissions that allow Amazon Transcribe Medical to put files in the bucket. For more information, see Permissions Required for IAM User Roles.

You can specify an AWS Key Management Service (KMS) key to encrypt the output of your transcription using the OutputEncryptionKMSKeyId parameter. If you don't specify a KMS key, Amazon Transcribe Medical uses the default Amazon S3 key for server-side encryption of transcripts that are placed in your S3 bucket.

" + }, + "OutputEncryptionKMSKeyId":{ + "shape":"KMSKeyId", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key used to encrypt the output of the transcription job. The user calling the StartMedicalTranscriptionJob operation must have permission to use the specified KMS key.

You use either of the following to identify a KMS key in the current account:

  • KMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"

  • KMS Key Alias: \"alias/ExampleAlias\"

You can use either of the following to identify a KMS key in the current account or another account:

  • Amazon Resource Name (ARN) of a KMS key in the current account or another account: \"arn:aws:kms:region:account ID:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

  • ARN of a KMS Key Alias: \"arn:aws:kms:region:account ID:alias/ExampleAlias\"

If you don't specify an encryption key, the output of the medical transcription job is encrypted with the default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your output, you must also specify an output location in the OutputBucketName parameter.

" + }, + "Settings":{ + "shape":"MedicalTranscriptionSetting", + "documentation":"

Optional settings for the medical transcription job.

" + }, + "Specialty":{ + "shape":"Specialty", + "documentation":"

The medical specialty of any clinician speaking in the input media.

" + }, + "Type":{ + "shape":"Type", + "documentation":"

The type of speech in the input audio. CONVERSATION refers to conversations between two or more speakers, e.g., a conversations between doctors and patients. DICTATION refers to single-speaker dictated speech, e.g., for clinical notes.

" + } + } + }, + "StartMedicalTranscriptionJobResponse":{ + "type":"structure", + "members":{ + "MedicalTranscriptionJob":{ + "shape":"MedicalTranscriptionJob", + "documentation":"

A batch job submitted to transcribe medical speech to text.

" + } + } + }, "StartTranscriptionJobRequest":{ "type":"structure", "required":[ @@ -797,7 +1350,7 @@ "members":{ "TranscriptionJobName":{ "shape":"TranscriptionJobName", - "documentation":"

The name of the job. Note that you can't use the strings \".\" or \"..\" by themselves as the job name. The name must also be unique within an AWS account.

" + "documentation":"

The name of the job. Note that you can't use the strings \".\" or \"..\" by themselves as the job name. The name must also be unique within an AWS account. If you try to create a transcription job with the same name as a previous transcription job you will receive a ConflictException error.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -984,13 +1537,62 @@ }, "documentation":"

Provides a summary of information about a transcription job.

" }, + "Type":{ + "type":"string", + "enum":[ + "CONVERSATION", + "DICTATION" + ] + }, + "UpdateMedicalVocabularyRequest":{ + "type":"structure", + "required":[ + "VocabularyName", + "LanguageCode" + ], + "members":{ + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The name of the vocabulary to update. The name is case-sensitive. If you try to update a vocabulary with the same name as a previous vocabulary you will receive a ConflictException error.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code of the entries in the updated vocabulary. US English (en-US) is the only valid language code in Amazon Transcribe Medical.

" + }, + "VocabularyFileUri":{ + "shape":"Uri", + "documentation":"

The Amazon S3 location of the text file containing the definition of the custom vocabulary. The URI must be in the same AWS region as the API endpoint you are calling. You can see the fields you need to enter for you Amazon S3 location in the example URI here:

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

For example:

https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies in Amazon Transcribe Medical, see Medical Custom Vocabularies.

" + } + } + }, + "UpdateMedicalVocabularyResponse":{ + "type":"structure", + "members":{ + "VocabularyName":{ + "shape":"VocabularyName", + "documentation":"

The name of the updated vocabulary.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code for the text file used to update the custom vocabulary. US English (en-US) is the only language supported in Amazon Transcribe Medical.

" + }, + "LastModifiedTime":{ + "shape":"DateTime", + "documentation":"

The date and time the vocabulary was updated.

" + }, + "VocabularyState":{ + "shape":"VocabularyState", + "documentation":"

The processing state of the update to the vocabulary. When the VocabularyState field is READY the vocabulary is ready to be used in a StartMedicalTranscriptionJob request.

" + } + } + }, "UpdateVocabularyFilterRequest":{ "type":"structure", "required":["VocabularyFilterName"], "members":{ "VocabularyFilterName":{ "shape":"VocabularyFilterName", - "documentation":"

The name of the vocabulary filter to update.

" + "documentation":"

The name of the vocabulary filter to update. If you try to update a vocabulary filter with the same name as a previous vocabulary filter you will receive a ConflictException error.

" }, "Words":{ "shape":"Words", @@ -1028,7 +1630,7 @@ "members":{ "VocabularyName":{ "shape":"VocabularyName", - "documentation":"

The name of the vocabulary to update. The name is case-sensitive.

" + "documentation":"

The name of the vocabulary to update. The name is case-sensitive. If you try to update a vocabulary with the same name as a previous vocabulary you will receive a ConflictException error.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -1040,7 +1642,7 @@ }, "VocabularyFileUri":{ "shape":"Uri", - "documentation":"

The S3 location of the text file that contains the definition of the custom vocabulary. The URI must be in the same region as the API endpoint that you are calling. The general form is

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

For example:

https://s3.us-east-1.amazonaws.com/examplebucket/vocab.txt

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Custom Vocabularies.

" + "documentation":"

The S3 location of the text file that contains the definition of the custom vocabulary. The URI must be in the same region as the API endpoint that you are calling. The general form is

For example:

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Custom Vocabularies.

" } } }, diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index c7991b03db83..ae782b347e59 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transcribestreaming/src/it/java/software/amazon/awssdk/services/transcribestreaming/TranscribeStreamingIntegrationTest.java b/services/transcribestreaming/src/it/java/software/amazon/awssdk/services/transcribestreaming/TranscribeStreamingIntegrationTest.java index fc2c6129f3e9..54fb9a67d4af 100644 --- a/services/transcribestreaming/src/it/java/software/amazon/awssdk/services/transcribestreaming/TranscribeStreamingIntegrationTest.java +++ b/services/transcribestreaming/src/it/java/software/amazon/awssdk/services/transcribestreaming/TranscribeStreamingIntegrationTest.java @@ -16,18 +16,21 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static software.amazon.awssdk.http.Header.CONTENT_TYPE; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.InputStream; -import java.net.URISyntaxException; +import java.time.Duration; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import org.junit.BeforeClass; import org.junit.Test; +import org.mockito.ArgumentCaptor; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; @@ -36,12 +39,17 @@ import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.internal.util.Mimetype; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.transcribestreaming.model.AudioStream; import software.amazon.awssdk.services.transcribestreaming.model.LanguageCode; import software.amazon.awssdk.services.transcribestreaming.model.MediaEncoding; import software.amazon.awssdk.services.transcribestreaming.model.StartStreamTranscriptionRequest; import software.amazon.awssdk.services.transcribestreaming.model.StartStreamTranscriptionResponseHandler; +import software.amazon.awssdk.utils.Logger; /** * An example test class to show the usage of @@ -51,31 +59,36 @@ * The audio files used in this class don't have voice, so there won't be any transcripted text would be empty */ public class TranscribeStreamingIntegrationTest { + private static final Logger log = Logger.loggerFor(TranscribeStreamingIntegrationTest.class); private static TranscribeStreamingAsyncClient client; + private static MetricPublisher mockPublisher; + @BeforeClass - public static void setup() throws URISyntaxException { + public static void setup() { + mockPublisher = mock(MetricPublisher.class); client = TranscribeStreamingAsyncClient.builder() .region(Region.US_EAST_1) - .overrideConfiguration(b -> b.addExecutionInterceptor(new VerifyHeaderInterceptor())) + .overrideConfiguration(b -> b.addExecutionInterceptor(new VerifyHeaderInterceptor()) + .addMetricPublisher(mockPublisher)) .credentialsProvider(getCredentials()) .build(); } @Test - public void testFileWith16kRate() throws ExecutionException, InterruptedException, URISyntaxException { + public void testFileWith16kRate() throws InterruptedException { CompletableFuture result = client.startStreamTranscription(getRequest(16_000), new AudioStreamPublisher( getInputStream("silence_16kHz_s16le.wav")), TestResponseHandlers.responseHandlerBuilder_Classic()); - // Blocking call to keep the main thread for shutting down - result.get(); + result.join(); + verifyMetrics(); } @Test - public void testFileWith8kRate() throws ExecutionException, InterruptedException, URISyntaxException { + public void testFileWith8kRate() throws ExecutionException, InterruptedException { CompletableFuture result = client.startStreamTranscription(getRequest(8_000), new AudioStreamPublisher( getInputStream("silence_8kHz_s16le.wav")), @@ -129,4 +142,33 @@ public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttr assertThat(contentTypeHeader.get(0)).isEqualTo(Mimetype.MIMETYPE_EVENT_STREAM); } } + + private void verifyMetrics() throws InterruptedException { + // wait for 100ms for metrics to be delivered to mockPublisher + Thread.sleep(100); + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(mockPublisher).publish(collectionCaptor.capture()); + MetricCollection capturedCollection = collectionCaptor.getValue(); + assertThat(capturedCollection.name()).isEqualTo("ApiCall"); + log.info(() -> "captured collection: " + capturedCollection); + + assertThat(capturedCollection.metricValues(CoreMetric.CREDENTIALS_FETCH_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.MARSHALLING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_DURATION).get(0)) + .isGreaterThan(Duration.ZERO); + + MetricCollection attemptCollection = capturedCollection.children().get(0); + assertThat(attemptCollection.name()).isEqualTo("ApiCallAttempt"); + assertThat(attemptCollection.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .containsExactly(200); + assertThat(attemptCollection.metricValues(CoreMetric.SIGNING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(attemptCollection.metricValues(CoreMetric.AWS_REQUEST_ID).get(0)).isNotEmpty(); + + assertThat(attemptCollection.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ofMillis(100)); + } + } diff --git a/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json b/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json index 17c2c1a2d420..eeed732fb0c7 100644 --- a/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json +++ b/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json @@ -24,7 +24,8 @@ {"shape":"BadRequestException"}, {"shape":"LimitExceededException"}, {"shape":"InternalFailureException"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ServiceUnavailableException"} ], "documentation":"

Starts a bidirectional HTTP2 stream where audio is streamed to Amazon Transcribe and the transcription results are streamed to your application.

The following are encoded as HTTP2 headers:

  • x-amzn-transcribe-language-code

  • x-amzn-transcribe-media-encoding

  • x-amzn-transcribe-sample-rate

  • x-amzn-transcribe-session-id

" } @@ -120,6 +121,10 @@ "Content":{ "shape":"String", "documentation":"

The word or punctuation that was recognized in the input audio.

" + }, + "VocabularyFilterMatch":{ + "shape":"Boolean", + "documentation":"

Indicates whether a word in the item matches a word in the vocabulary filter you've chosen for your real-time stream. If true then a word in the item matches your vocabulary filter.

" } }, "documentation":"

A word or phrase transcribed from the input audio.

" @@ -195,6 +200,15 @@ "type":"list", "member":{"shape":"Result"} }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

Service is currently unavailable. Try your request later.

", + "error":{"httpStatusCode":503}, + "exception":true + }, "SessionId":{ "type":"string", "pattern":"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" @@ -241,6 +255,18 @@ "AudioStream":{ "shape":"AudioStream", "documentation":"

PCM-encoded stream of audio blobs. The audio stream is encoded as an HTTP2 data frame.

" + }, + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the vocabulary filter you've created that is unique to your AWS accountf. Provide the name in this field to successfully use it in a stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-vocabulary-filter-name" + }, + "VocabularyFilterMethod":{ + "shape":"VocabularyFilterMethod", + "documentation":"

The manner in which you use your vocabulary filter to filter words in your transcript. Remove removes filtered words from your transcription results. Mask masks those words with a *** in your transcription results. Tag keeps the filtered words in your transcription results and tags them. The tag appears as VocabularyFilterMatch equal to True

", + "location":"header", + "locationName":"x-amzn-transcribe-vocabulary-filter-method" } }, "payload":"AudioStream" @@ -274,7 +300,7 @@ }, "VocabularyName":{ "shape":"VocabularyName", - "documentation":"

The name of the vocabulary used when processing the job.

", + "documentation":"

The name of the vocabulary used when processing the stream.

", "location":"header", "locationName":"x-amzn-transcribe-vocabulary-name" }, @@ -287,6 +313,18 @@ "TranscriptResultStream":{ "shape":"TranscriptResultStream", "documentation":"

Represents the stream of transcription events from Amazon Transcribe to your application.

" + }, + "VocabularyFilterName":{ + "shape":"VocabularyFilterName", + "documentation":"

The name of the vocabulary filter used in your real-time stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-vocabulary-filter-name" + }, + "VocabularyFilterMethod":{ + "shape":"VocabularyFilterMethod", + "documentation":"

The vocabulary filtering method used in the real-time stream.

", + "location":"header", + "locationName":"x-amzn-transcribe-vocabulary-filter-method" } }, "payload":"TranscriptResultStream" @@ -335,11 +373,29 @@ "ConflictException":{ "shape":"ConflictException", "documentation":"

A new stream started with the same session ID. The current stream has been terminated.

" + }, + "ServiceUnavailableException":{ + "shape":"ServiceUnavailableException", + "documentation":"

Service is currently unavailable. Try your request later.

" } }, "documentation":"

Represents the transcription result stream from Amazon Transcribe to your application.

", "eventstream":true }, + "VocabularyFilterMethod":{ + "type":"string", + "enum":[ + "remove", + "mask", + "tag" + ] + }, + "VocabularyFilterName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z._-]+" + }, "VocabularyName":{ "type":"string", "max":200, diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index 22e65a92765d..37e46ead38a1 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/transfer/src/main/resources/codegen-resources/service-2.json b/services/transfer/src/main/resources/codegen-resources/service-2.json index 63b3b0b26de2..90bd06f70256 100644 --- a/services/transfer/src/main/resources/codegen-resources/service-2.json +++ b/services/transfer/src/main/resources/codegen-resources/service-2.json @@ -6,7 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceAbbreviation":"AWS Transfer", - "serviceFullName":"AWS Transfer for SFTP", + "serviceFullName":"AWS Transfer Family", "serviceId":"Transfer", "signatureVersion":"v4", "signingName":"transfer", @@ -23,12 +23,14 @@ "input":{"shape":"CreateServerRequest"}, "output":{"shape":"CreateServerResponse"}, "errors":[ + {"shape":"AccessDeniedException"}, {"shape":"ServiceUnavailableException"}, {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"}, - {"shape":"ResourceExistsException"} + {"shape":"ResourceExistsException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

Instantiates an autoscaling virtual server based on Secure File Transfer Protocol (SFTP) in AWS. When you make updates to your server or when you work with users, use the service-generated ServerId property that is assigned to the newly created server.

" + "documentation":"

Instantiates an autoscaling virtual server based on the selected file transfer protocol in AWS. When you make updates to your file transfer protocol-enabled server or when you work with users, use the service-generated ServerId property that is assigned to the newly created server.

" }, "CreateUser":{ "name":"CreateUser", @@ -45,7 +47,7 @@ {"shape":"ResourceExistsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Creates a user and associates them with an existing Secure File Transfer Protocol (SFTP) server. You can only create and associate users with SFTP servers that have the IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's AWS Identity and Access Management (IAM) role. You can also optionally add a scope-down policy, and assign metadata with tags that can be used to group and search for users.

" + "documentation":"

Creates a user and associates them with an existing file transfer protocol-enabled server. You can only create and associate users with servers that have the IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, you can specify the user name, set the home directory, store the user's public key, and assign the user's AWS Identity and Access Management (IAM) role. You can also optionally add a scope-down policy, and assign metadata with tags that can be used to group and search for users.

" }, "DeleteServer":{ "name":"DeleteServer", @@ -55,12 +57,13 @@ }, "input":{"shape":"DeleteServerRequest"}, "errors":[ + {"shape":"AccessDeniedException"}, {"shape":"ServiceUnavailableException"}, {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes the Secure File Transfer Protocol (SFTP) server that you specify.

No response returns from this operation.

" + "documentation":"

Deletes the file transfer protocol-enabled server that you specify.

No response returns from this operation.

" }, "DeleteSshPublicKey":{ "name":"DeleteSshPublicKey", @@ -91,7 +94,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes the user belonging to the server you specify.

No response returns from this operation.

When you delete a user from a server, the user's information is lost.

" + "documentation":"

Deletes the user belonging to a file transfer protocol-enabled server you specify.

No response returns from this operation.

When you delete a user from a server, the user's information is lost.

" }, "DescribeServer":{ "name":"DescribeServer", @@ -107,7 +110,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the server that you specify by passing the ServerId parameter.

The response contains a description of the server's properties. When you set EndpointType to VPC, the response will contain the EndpointDetails.

" + "documentation":"

Describes a file transfer protocol-enabled server that you specify by passing the ServerId parameter.

The response contains a description of a server's properties. When you set EndpointType to VPC, the response will contain the EndpointDetails.

" }, "DescribeUser":{ "name":"DescribeUser", @@ -123,7 +126,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Describes the user assigned to a specific server, as identified by its ServerId property.

The response from this call returns the properties of the user associated with the ServerId value that was specified.

" + "documentation":"

Describes the user assigned to the specific file transfer protocol-enabled server, as identified by its ServerId property.

The response from this call returns the properties of the user associated with the ServerId value that was specified.

" }, "ImportSshPublicKey":{ "name":"ImportSshPublicKey", @@ -141,7 +144,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Adds a Secure Shell (SSH) public key to a user account identified by a UserName value assigned to a specific server, identified by ServerId.

The response returns the UserName value, the ServerId value, and the name of the SshPublicKeyId.

" + "documentation":"

Adds a Secure Shell (SSH) public key to a user account identified by a UserName value assigned to the specific file transfer protocol-enabled server, identified by ServerId.

The response returns the UserName value, the ServerId value, and the name of the SshPublicKeyId.

" }, "ListServers":{ "name":"ListServers", @@ -157,7 +160,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Lists the Secure File Transfer Protocol (SFTP) servers that are associated with your AWS account.

" + "documentation":"

Lists the file transfer protocol-enabled servers that are associated with your AWS account.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -190,7 +193,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists the users for the server that you specify by passing the ServerId parameter.

" + "documentation":"

Lists the users for a file transfer protocol-enabled server that you specify by passing the ServerId parameter.

" }, "StartServer":{ "name":"StartServer", @@ -206,7 +209,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Changes the state of a Secure File Transfer Protocol (SFTP) server from OFFLINE to ONLINE. It has no impact on an SFTP server that is already ONLINE. An ONLINE server can accept and process file transfer jobs.

The state of STARTING indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of START_FAILED can indicate an error condition.

No response is returned from this call.

" + "documentation":"

Changes the state of a file transfer protocol-enabled server from OFFLINE to ONLINE. It has no impact on a server that is already ONLINE. An ONLINE server can accept and process file transfer jobs.

The state of STARTING indicates that the server is in an intermediate state, either not fully able to respond, or not fully online. The values of START_FAILED can indicate an error condition.

No response is returned from this call.

" }, "StopServer":{ "name":"StopServer", @@ -222,7 +225,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Changes the state of an SFTP server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server such as server and user properties are not affected by stopping your server. Stopping a server will not reduce or impact your Secure File Transfer Protocol (SFTP) endpoint billing.

The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

" + "documentation":"

Changes the state of a file transfer protocol-enabled server from ONLINE to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. Information tied to your server, such as server and user properties, are not affected by stopping your server. Stopping the server will not reduce or impact your file transfer protocol endpoint billing.

The state of STOPPING indicates that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of STOP_FAILED can indicate an error condition.

No response is returned from this call.

" }, "TagResource":{ "name":"TagResource", @@ -253,7 +256,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

If the IdentityProviderType of the server is API_Gateway, tests whether your API Gateway is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the API Gateway integration to ensure that your users can successfully use the service.

" + "documentation":"

If the IdentityProviderType of a file transfer protocol-enabled server is API_Gateway, tests whether your API Gateway is set up successfully. We highly recommend that you call this operation to test your authentication method as soon as you create your server. By doing so, you can troubleshoot issues with the API Gateway integration to ensure that your users can successfully use the service.

" }, "UntagResource":{ "name":"UntagResource", @@ -279,6 +282,7 @@ "input":{"shape":"UpdateServerRequest"}, "output":{"shape":"UpdateServerResponse"}, "errors":[ + {"shape":"AccessDeniedException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ConflictException"}, {"shape":"InternalServiceError"}, @@ -287,7 +291,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates the server properties after that server has been created.

The UpdateServer call returns the ServerId of the Secure File Transfer Protocol (SFTP) server you updated.

" + "documentation":"

Updates the file transfer protocol-enabled server's properties after that server has been created.

The UpdateServer call returns the ServerId of the server you updated.

" }, "UpdateUser":{ "name":"UpdateUser", @@ -308,6 +312,15 @@ } }, "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ServiceErrorMessage"} + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "exception":true, + "synthetic":true + }, "AddressAllocationId":{"type":"string"}, "AddressAllocationIds":{ "type":"list", @@ -319,45 +332,57 @@ "min":20, "pattern":"arn:.*" }, + "Certificate":{ + "type":"string", + "max":1600 + }, "ConflictException":{ "type":"structure", "required":["Message"], "members":{ "Message":{"shape":"Message"} }, - "documentation":"

This exception is thrown when the UpdatServer is called for a server that has VPC as the endpoint type and the server's VpcEndpointID is not in the available state.

", + "documentation":"

This exception is thrown when the UpdatServer is called for a file transfer protocol-enabled server that has VPC as the endpoint type and the server's VpcEndpointID is not in the available state.

", "exception":true }, "CreateServerRequest":{ "type":"structure", "members":{ + "Certificate":{ + "shape":"Certificate", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

To request a new public certificate, see Request a public certificate in the AWS Certificate Manager User Guide.

To import an existing certificate into ACM, see Importing certificates into ACM in the AWS Certificate Manager User Guide.

To request a private certificate to use FTPS through private IP addresses, see Request a private certificate in the AWS Certificate Manager User Guide.

Certificates with the following cryptographic algorithms and key sizes are supported:

  • 2048-bit RSA (RSA_2048)

  • 4096-bit RSA (RSA_4096)

  • Elliptic Prime Curve 256 bit (EC_prime256v1)

  • Elliptic Prime Curve 384 bit (EC_secp384r1)

  • Elliptic Prime Curve 521 bit (EC_secp521r1)

The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer.

" + }, "EndpointDetails":{ "shape":"EndpointDetails", - "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, you can restrict access to your SFTP server to resources only within your VPC. To control incoming internet traffic, you will need to invoke the UpdateServer API and attach an Elastic IP to your server's endpoint.

" + "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. When you host your endpoint within your VPC, you can make it accessible only to resources within your VPC, or you can attach Elastic IPs and make it accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of VPC endpoint that you want your SFTP server to connect to. You can choose to connect to the public internet or a virtual private cloud (VPC) endpoint. With a VPC endpoint, you can restrict access to your SFTP server and resources only within your VPC.

" + "documentation":"

The type of VPC endpoint that you want your file transfer protocol-enabled server to connect to. You can choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, you can restrict access to your server and resources only within your VPC.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" }, "HostKey":{ "shape":"HostKey", - "documentation":"

The RSA private key as generated by the ssh-keygen -N \"\" -f my-new-server-key command.

If you aren't planning to migrate existing users from an existing SFTP server to a new AWS SFTP server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see \"https://alpha-docs-aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key\" in the AWS SFTP User Guide.

" + "documentation":"

The RSA private key as generated by the ssh-keygen -N \"\" -m PEM -f my-new-server-key command.

If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see Change the host key for your SFTP-enabled server in the AWS Transfer Family User Guide.

" }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", - "documentation":"

This parameter is required when the IdentityProviderType is set to API_GATEWAY. Accepts an array containing all of the information required to call a customer-supplied authentication API, including the API Gateway URL. This property is not required when the IdentityProviderType is set to SERVICE_MANAGED.

" + "documentation":"

Required when IdentityProviderType is set to API_GATEWAY. Accepts an array containing all of the information required to call a customer-supplied authentication API, including the API Gateway URL. Not required when IdentityProviderType is set to SERVICE_MANAGED.

" }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

Specifies the mode of authentication for the SFTP server. The default value is SERVICE_MANAGED, which allows you to store and access SFTP user credentials within the AWS Transfer for SFTP service. Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an API Gateway endpoint URL to call for authentication using the IdentityProviderDetails parameter.

" + "documentation":"

Specifies the mode of authentication for a file transfer protocol-enabled server. The default value is SERVICE_MANAGED, which allows you to store and access user credentials within the AWS Transfer Family service. Use the API_GATEWAY value to integrate with an identity provider of your choosing. The API_GATEWAY setting requires you to provide an API Gateway endpoint URL to call for authentication using the IdentityProviderDetails parameter.

" }, "LoggingRole":{ "shape":"Role", - "documentation":"

A value that allows the service to write your SFTP users' activity to your Amazon CloudWatch logs for monitoring and auditing purposes.

" + "documentation":"

Allows the service to write your users' activity to your Amazon CloudWatch logs for monitoring and auditing purposes.

" + }, + "Protocols":{ + "shape":"Protocols", + "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH

  • FTPS (File Transfer Protocol Secure): File transfer with TLS encryption

  • FTP (File Transfer Protocol): Unencrypted file transfer

If you select FTPS, you must choose a certificate stored in AWS Certificate Manager (ACM) which will be used to identify your server when clients connect to it over FTPS.

If Protocol includes either FTP or FTPS, then the EndpointType must be VPC and the IdentityProviderType must be API_GATEWAY.

If Protocol includes FTP, then AddressAllocationIds cannot be associated.

If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and the IdentityProviderType can be set to SERVICE_MANAGED.

" }, "Tags":{ "shape":"Tags", - "documentation":"

Key-value pairs that can be used to group and search for servers.

" + "documentation":"

Key-value pairs that can be used to group and search for file transfer protocol-enabled servers.

" } } }, @@ -367,7 +392,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

The service-assigned ID of the SFTP server that is created.

" + "documentation":"

The service-assigned ID of the file transfer protocol-enabled server that is created.

" } } }, @@ -381,31 +406,31 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

The landing directory (folder) for a user when they log in to the server using their SFTP client.

An example is <your-Amazon-S3-bucket-name>/home/username.

" + "documentation":"

The landing directory (folder) for a user when they log in to the file transfer protocol-enabled server using the client.

An example is your-Amazon-S3-bucket-name>/home/username .

" }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", - "documentation":"

The type of landing directory (folder) you want your users' home directory to be when they log into the SFTP server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make S3 paths visible to your user.

" + "documentation":"

The type of landing directory (folder) you want your users' home directory to be when they log into the file transfer protocol-enabled server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in S3, the entry will be ignored. As a workaround, you can use the S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" + "documentation":"

Logical directory mappings that specify what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your IAM role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in Amazon S3, the entry will be ignored. As a workaround, you can use the Amazon S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a '/' for it to be considered a folder.

" }, "Policy":{ "shape":"Policy", - "documentation":"

A scope-down policy for your user so you can use the same IAM role across multiple users. This policy scopes down user access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see \"https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\">Creating a Scope-Down Policy.

For more information, see \"https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html\" in the AWS Security Token Service API Reference.

" + "documentation":"

A scope-down policy for your user so you can use the same IAM role across multiple users. This policy scopes down user access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see Creating a scope-down policy.

For more information, see AssumeRole in the AWS Security Token Service API Reference.

" }, "Role":{ "shape":"Role", - "documentation":"

The IAM role that controls your user's access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the SFTP server to access your resources when servicing your SFTP user's transfer requests.

" + "documentation":"

The IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the file transfer protocol-enabled server to access your resources when servicing your users' transfer requests.

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server instance. This is the specific SFTP server that you added your user to.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance. This is the specific server that you added your user to.

" }, "SshPublicKeyBody":{ "shape":"SshPublicKeyBody", - "documentation":"

The public portion of the Secure Shell (SSH) key used to authenticate the user to the SFTP server.

" + "documentation":"

The public portion of the Secure Shell (SSH) key used to authenticate the user to the file transfer protocol-enabled server.

" }, "Tags":{ "shape":"Tags", @@ -413,7 +438,7 @@ }, "UserName":{ "shape":"UserName", - "documentation":"

A unique string that identifies a user and is associated with a server as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

" + "documentation":"

A unique string that identifies a user and is associated with a file transfer protocol-enabled server as specified by the ServerId. This user name must be a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

" } } }, @@ -426,11 +451,11 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

The ID of the SFTP server that the user is attached to.

" + "documentation":"

The ID of the file transfer protocol-enabled server that the user is attached to.

" }, "UserName":{ "shape":"UserName", - "documentation":"

A unique string that identifies a user account associated with an SFTP server.

" + "documentation":"

A unique string that identifies a user account associated with a file transfer protocol-enabled server.

" } } }, @@ -441,7 +466,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A unique system-assigned identifier for an SFTP server instance.

" + "documentation":"

A unique system-assigned identifier for a file transfer protocol-enabled server instance.

" } } }, @@ -455,7 +480,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) server instance that has the user assigned to it.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance that has the user assigned to it.

" }, "SshPublicKeyId":{ "shape":"SshPublicKeyId", @@ -476,11 +501,11 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server instance that has the user assigned to it.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance that has the user assigned to it.

" }, "UserName":{ "shape":"UserName", - "documentation":"

A unique string that identifies a user that is being deleted from the server.

" + "documentation":"

A unique string that identifies a user that is being deleted from a file transfer protocol-enabled server.

" } } }, @@ -490,7 +515,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server.

" } } }, @@ -500,7 +525,7 @@ "members":{ "Server":{ "shape":"DescribedServer", - "documentation":"

An array containing the properties of the server with the ServerID you specified.

" + "documentation":"

An array containing the properties of a file transfer protocol-enabled server with the ServerID you specified.

" } } }, @@ -513,11 +538,11 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server that has this user assigned.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that has this user assigned.

" }, "UserName":{ "shape":"UserName", - "documentation":"

The name of the user assigned to one or more servers. User names are part of the sign-in credentials to use the AWS Transfer for SFTP service and perform file transfer tasks.

" + "documentation":"

The name of the user assigned to one or more file transfer protocol-enabled servers. User names are part of the sign-in credentials to use the AWS Transfer Family service and perform file transfer tasks.

" } } }, @@ -530,7 +555,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server that has this user assigned.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that has this user assigned.

" }, "User":{ "shape":"DescribedUser", @@ -544,50 +569,58 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

Specifies the unique Amazon Resource Name (ARN) for the server to be described.

" + "documentation":"

Specifies the unique Amazon Resource Name (ARN) for a file transfer protocol-enabled server to be described.

" + }, + "Certificate":{ + "shape":"Certificate", + "documentation":"

Specifies the ARN of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

" }, "EndpointDetails":{ "shape":"EndpointDetails", - "documentation":"

The virtual private cloud (VPC) endpoint settings that you configured for your SFTP server.

" + "documentation":"

Specifies the virtual private cloud (VPC) endpoint settings that you configured for your file transfer protocol-enabled server.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of endpoint that your SFTP server is connected to. If your SFTP server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" + "documentation":"

Defines the type of endpoint that your file transfer protocol-enabled server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" }, "HostKeyFingerprint":{ "shape":"HostKeyFingerprint", - "documentation":"

This value contains the message-digest algorithm (MD5) hash of the server's host key. This value is equivalent to the output of the ssh-keygen -l -E md5 -f my-new-server-key command.

" + "documentation":"

Specifies the Base64-encoded SHA256 fingerprint of the server's host key. This value is equivalent to the output of the ssh-keygen -l -f my-new-server-key command.

" }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", - "documentation":"

Specifies information to call a customer-supplied authentication API. This field is not populated when the IdentityProviderType of the server is SERVICE_MANAGED>.

" + "documentation":"

Specifies information to call a customer-supplied authentication API. This field is not populated when the IdentityProviderType of a file transfer protocol-enabled server is SERVICE_MANAGED.

" }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

This property defines the mode of authentication method enabled for this service. A value of SERVICE_MANAGED means that you are using this server to store and access SFTP user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

" + "documentation":"

Specifies the mode of authentication method enabled for this service. A value of SERVICE_MANAGED means that you are using this file transfer protocol-enabled server to store and access user credentials within the service. A value of API_GATEWAY indicates that you have integrated an API Gateway endpoint that will be invoked for authenticating your user into the service.

" }, "LoggingRole":{ "shape":"Role", - "documentation":"

This property is an AWS Identity and Access Management (IAM) entity that allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be viewed in your CloudWatch logs.

" + "documentation":"

Specifies the AWS Identity and Access Management (IAM) role that allows a file transfer protocol-enabled server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, user activity can be viewed in your CloudWatch logs.

" + }, + "Protocols":{ + "shape":"Protocols", + "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH

  • FTPS (File Transfer Protocol Secure): File transfer with TLS encryption

  • FTP (File Transfer Protocol): Unencrypted file transfer

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

This property is a unique system-assigned identifier for the SFTP server that you instantiate.

" + "documentation":"

Specifies the unique system-assigned identifier for a file transfer protocol-enabled server that you instantiate.

" }, "State":{ "shape":"State", - "documentation":"

The condition of the SFTP server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" + "documentation":"

Specifies the condition of a file transfer protocol-enabled server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" }, "Tags":{ "shape":"Tags", - "documentation":"

This property contains the key-value pairs that you can use to search for and group servers that were assigned to the server that was described.

" + "documentation":"

Specifies the key-value pairs that you can use to search for and group file transfer protocol-enabled servers that were assigned to the server that was described.

" }, "UserCount":{ "shape":"UserCount", - "documentation":"

The number of users that are assigned to the SFTP server you specified with the ServerId.

" + "documentation":"

Specifies the number of users that are assigned to a file transfer protocol-enabled server you specified with the ServerId.

" } }, - "documentation":"

Describes the properties of the server that was specified. Information returned includes the following: the server Amazon Resource Name (ARN), the authentication configuration and type, the logging role, the server ID and state, and assigned tags or metadata.

" + "documentation":"

Describes the properties of a file transfer protocol-enabled server that was specified. Information returned includes the following: the server Amazon Resource Name (ARN), the certificate ARN (if the FTPS protocol was selected), the endpoint type and details, the authentication configuration and type, the logging role, the file transfer protocol or protocols, the server ID and state, and assigned tags or metadata.

" }, "DescribedUser":{ "type":"structure", @@ -595,19 +628,19 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

This property contains the unique Amazon Resource Name (ARN) for the user that was requested to be described.

" + "documentation":"

Specifies the unique Amazon Resource Name (ARN) for the user that was requested to be described.

" }, "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

This property specifies the landing directory (or folder), which is the location that files are written to or read from in an Amazon S3 bucket for the described user. An example is /your s3 bucket name/home/username .

" + "documentation":"

Specifies the landing directory (or folder), which is the location that files are written to or read from in an Amazon S3 bucket, for the described user. An example is your-Amazon-S3-bucket-name>/home/username .

" }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that you specified for what S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target.

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

" + "documentation":"

Specifies the logical directory mappings that specify what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS Identity and Access Management (IAM) role provides access to paths in Target.

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

" }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", - "documentation":"

The type of landing directory (folder) you mapped for your users' to see when they log into the SFTP server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make S3 paths visible to your user.

" + "documentation":"

Specifies the type of landing directory (folder) you mapped for your users to see when they log into the file transfer protocol-enabled server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" }, "Policy":{ "shape":"Policy", @@ -615,19 +648,19 @@ }, "Role":{ "shape":"Role", - "documentation":"

This property specifies the IAM role that controls your user's access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the SFTP server to access your resources when servicing your SFTP user's transfer requests.

" + "documentation":"

Specifies the IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows a file transfer protocol-enabled server to access your resources when servicing your users' transfer requests.

" }, "SshPublicKeys":{ "shape":"SshPublicKeys", - "documentation":"

This property contains the public key portion of the Secure Shell (SSH) keys stored for the described user.

" + "documentation":"

Specifies the public key portion of the Secure Shell (SSH) keys stored for the described user.

" }, "Tags":{ "shape":"Tags", - "documentation":"

This property contains the key-value pairs for the user requested. Tag can be used to search for and group users for a variety of purposes.

" + "documentation":"

Specifies the key-value pairs for the user requested. Tag can be used to search for and group users for a variety of purposes.

" }, "UserName":{ "shape":"UserName", - "documentation":"

This property is the name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your SFTP server.

" + "documentation":"

Specifies the name of the user that was requested to be described. User names are used for authentication purposes. This is the string that will be used by your user when they log in to your file transfer protocol-enabled server.

" } }, "documentation":"

Returns properties of the user that you want to describe.

" @@ -637,22 +670,22 @@ "members":{ "AddressAllocationIds":{ "shape":"AddressAllocationIds", - "documentation":"

A list of address allocation IDs that are required to attach an Elastic IP address to your SFTP server's endpoint. This is only valid in the UpdateServer API.

This property can only be use when EndpointType is set to VPC.

" + "documentation":"

A list of address allocation IDs that are required to attach an Elastic IP address to your file transfer protocol-enabled server's endpoint. This is only valid in the UpdateServer API.

This property can only be use when EndpointType is set to VPC.

" }, "SubnetIds":{ "shape":"SubnetIds", - "documentation":"

A list of subnet IDs that are required to host your SFTP server endpoint in your VPC.

" + "documentation":"

A list of subnet IDs that are required to host your file transfer protocol-enabled server endpoint in your VPC.

This property can only be used when EndpointType is set to VPC.

" }, "VpcEndpointId":{ "shape":"VpcEndpointId", - "documentation":"

The ID of the VPC endpoint.

" + "documentation":"

The ID of the VPC endpoint.

This property can only be used when EndpointType is set to VPC_ENDPOINT.

" }, "VpcId":{ "shape":"VpcId", - "documentation":"

The VPC ID of the virtual private cloud in which the SFTP server's endpoint will be hosted.

" + "documentation":"

The VPC ID of the VPC in which a file transfer protocol-enabled server's endpoint will be hosted.

This property can only be used when EndpointType is set to VPC.

" } }, - "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, you can restrict access to your SFTP server and resources only within your VPC. To control incoming internet traffic, invoke the UpdateServer API and attach an Elastic IP to your server's endpoint.

" + "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server and resources only within your VPC. To control incoming internet traffic, invoke the UpdateServer API and attach an Elastic IP to your server's endpoint.

" }, "EndpointType":{ "type":"string", @@ -709,18 +742,18 @@ "members":{ "Url":{ "shape":"Url", - "documentation":"

The Url parameter provides contains the location of the service endpoint used to authenticate users.

" + "documentation":"

Provides the location of the service endpoint used to authenticate users.

" }, "InvocationRole":{ "shape":"Role", - "documentation":"

The InvocationRole parameter provides the type of InvocationRole used to authenticate the user account.

" + "documentation":"

Provides the type of InvocationRole used to authenticate the user account.

" } }, - "documentation":"

Returns information related to the type of user authentication that is in use for a server's users. A server can have only one method of authentication.

" + "documentation":"

Returns information related to the type of user authentication that is in use for a file transfer protocol-enabled server's users. A server can have only one method of authentication.

" }, "IdentityProviderType":{ "type":"string", - "documentation":"

Returns information related to the type of user authentication that is in use for a server's users. For SERVICE_MANAGED authentication, the Secure Shell (SSH) public keys are stored with a user on an SFTP server instance. For API_GATEWAY authentication, your custom authentication method is implemented by using an API call. A server can have only one method of authentication.

", + "documentation":"

Returns information related to the type of user authentication that is in use for a file transfer protocol-enabled server's users. For SERVICE_MANAGED authentication, the Secure Shell (SSH) public keys are stored with a user on the server instance. For API_GATEWAY authentication, your custom authentication method is implemented by using an API call. The server can have only one method of authentication.

", "enum":[ "SERVICE_MANAGED", "API_GATEWAY" @@ -736,7 +769,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server.

" }, "SshPublicKeyBody":{ "shape":"SshPublicKeyBody", @@ -744,7 +777,7 @@ }, "UserName":{ "shape":"UserName", - "documentation":"

The name of the user account that is assigned to one or more servers.

" + "documentation":"

The name of the user account that is assigned to one or more file transfer protocol-enabled servers.

" } } }, @@ -758,18 +791,18 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server.

" }, "SshPublicKeyId":{ "shape":"SshPublicKeyId", - "documentation":"

This identifier is the name given to a public key by the system that was imported.

" + "documentation":"

The name given to a public key by the system that was imported.

" }, "UserName":{ "shape":"UserName", "documentation":"

A user name assigned to the ServerID value that you specified.

" } }, - "documentation":"

This response identifies the user, the server they belong to, and the identifier of the SSH public key associated with that user. A user can have more than one key on each server that they are associated with.

" + "documentation":"

Identifies the user, the file transfer protocol-enabled server they belong to, and the identifier of the SSH public key associated with that user. A user can have more than one key on each server that they are associated with.

" }, "InternalServiceError":{ "type":"structure", @@ -777,7 +810,7 @@ "members":{ "Message":{"shape":"Message"} }, - "documentation":"

This exception is thrown when an error occurs in the AWS Transfer for SFTP service.

", + "documentation":"

This exception is thrown when an error occurs in the AWS Transfer Family service.

", "exception":true, "fault":true }, @@ -804,11 +837,11 @@ "members":{ "MaxResults":{ "shape":"MaxResults", - "documentation":"

Specifies the number of servers to return as a response to the ListServers query.

" + "documentation":"

Specifies the number of file transfer protocol-enabled servers to return as a response to the ListServers query.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

When additional results are obtained from the ListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional servers.

" + "documentation":"

When additional results are obtained from theListServers command, a NextToken parameter is returned in the output. You can then pass the NextToken parameter in a subsequent command to continue listing additional file transfer protocol-enabled servers.

" } } }, @@ -818,11 +851,11 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

When you can get additional results from the ListServers operation, a NextToken parameter is returned in the output. In a following command, you can pass in the NextToken parameter to continue listing additional servers.

" + "documentation":"

When you can get additional results from the ListServers operation, a NextToken parameter is returned in the output. In a following command, you can pass in the NextToken parameter to continue listing additional file transfer protocol-enabled servers.

" }, "Servers":{ "shape":"ListedServers", - "documentation":"

An array of servers that were listed.

" + "documentation":"

An array of file transfer protocol-enabled servers that were listed.

" } } }, @@ -849,7 +882,7 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

This value is the ARN you specified to list the tags of.

" + "documentation":"

The ARN you specified to list the tags of.

" }, "NextToken":{ "shape":"NextToken", @@ -875,7 +908,7 @@ }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) server that has users assigned to it.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that has users assigned to it.

" } } }, @@ -892,7 +925,7 @@ }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server that the users are assigned to.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that the users are assigned to.

" }, "Users":{ "shape":"ListedUsers", @@ -906,34 +939,34 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

The unique Amazon Resource Name (ARN) for the server to be listed.

" + "documentation":"

Specifies the unique Amazon Resource Name (ARN) for a file transfer protocol-enabled server to be listed.

" }, "IdentityProviderType":{ "shape":"IdentityProviderType", - "documentation":"

The authentication method used to validate a user for the server that was specified. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

" + "documentation":"

Specifies the authentication method used to validate a user for a file transfer protocol-enabled server that was specified. This can include Secure Shell (SSH), user name and password combinations, or your own custom authentication method. Valid values include SERVICE_MANAGED or API_GATEWAY.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of VPC endpoint that your SFTP server is connected to. If your SFTP server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" + "documentation":"

Specifies the type of VPC endpoint that your file transfer protocol-enabled server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet.

" }, "LoggingRole":{ "shape":"Role", - "documentation":"

The AWS Identity and Access Management entity that allows the server to turn on Amazon CloudWatch logging.

" + "documentation":"

Specifies the AWS Identity and Access Management (IAM) role that allows a file transfer protocol-enabled server to turn on Amazon CloudWatch logging.

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

This value is the unique system assigned identifier for the SFTP servers that were listed.

" + "documentation":"

Specifies the unique system assigned identifier for a file transfer protocol-enabled servers that were listed.

" }, "State":{ "shape":"State", - "documentation":"

This property describes the condition of the SFTP server for the server that was described. A value of ONLINE> indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" + "documentation":"

Specifies the condition of a file transfer protocol-enabled server for the server that was described. A value of ONLINE indicates that the server can accept jobs and transfer files. A State value of OFFLINE means that the server cannot perform file transfer operations.

The states of STARTING and STOPPING indicate that the server is in an intermediate state, either not fully able to respond, or not fully offline. The values of START_FAILED or STOP_FAILED can indicate an error condition.

" }, "UserCount":{ "shape":"UserCount", - "documentation":"

This property is a numeric value that indicates the number of users that are assigned to the SFTP server you specified with the ServerId.

" + "documentation":"

Specifies the number of users that are assigned to a file transfer protocol-enabled server you specified with the ServerId.

" } }, - "documentation":"

Returns properties of the server that was specified.

" + "documentation":"

Returns properties of a file transfer protocol-enabled server that was specified.

" }, "ListedServers":{ "type":"list", @@ -945,27 +978,27 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

This property is the unique Amazon Resource Name (ARN) for the user that you want to learn about.

" + "documentation":"

Provides the unique Amazon Resource Name (ARN) for the user that you want to learn about.

" }, "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

This value specifies the location that files are written to or read from an Amazon S3 bucket for the user you specify by their ARN.

" + "documentation":"

Specifies the location that files are written to or read from an Amazon S3 bucket for the user you specify by their ARN.

" }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", - "documentation":"

The type of landing directory (folder) you mapped for your users' home directory. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make S3 paths visible to your user.

" + "documentation":"

Specifies the type of landing directory (folder) you mapped for your users' home directory. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" }, "Role":{ "shape":"Role", - "documentation":"

The role in use by this user. A role is an AWS Identity and Access Management (IAM) entity that, in this case, allows the SFTP server to act on a user's behalf. It allows the server to inherit the trust relationship that enables that user to perform file operations to their Amazon S3 bucket.

" + "documentation":"

Specifies the role that is in use by this user. A role is an AWS Identity and Access Management (IAM) entity that, in this case, allows a file transfer protocol-enabled server to act on a user's behalf. It allows the server to inherit the trust relationship that enables that user to perform file operations to their Amazon S3 bucket.

" }, "SshPublicKeyCount":{ "shape":"SshPublicKeyCount", - "documentation":"

This value is the number of SSH public keys stored for the user you specified.

" + "documentation":"

Specifies the number of SSH public keys stored for the user you specified.

" }, "UserName":{ "shape":"UserName", - "documentation":"

The name of the user whose ARN was specified. User names are used for authentication purposes.

" + "documentation":"

Specifies the name of the user whose ARN was specified. User names are used for authentication purposes.

" } }, "documentation":"

Returns properties of the user that you specify.

" @@ -1004,6 +1037,20 @@ "type":"string", "max":2048 }, + "Protocol":{ + "type":"string", + "enum":[ + "SFTP", + "FTP", + "FTPS" + ] + }, + "Protocols":{ + "type":"list", + "member":{"shape":"Protocol"}, + "max":3, + "min":1 + }, "Resource":{"type":"string"}, "ResourceExistsException":{ "type":"structure", @@ -1032,7 +1079,7 @@ "Resource":{"shape":"Resource"}, "ResourceType":{"shape":"ResourceType"} }, - "documentation":"

This exception is thrown when a resource is not found by the AWS Transfer for SFTP service.

", + "documentation":"

This exception is thrown when a resource is not found by the AWS Transfer Family service.

", "exception":true }, "ResourceType":{"type":"string"}, @@ -1056,11 +1103,16 @@ "members":{ "Message":{"shape":"ServiceErrorMessage"} }, - "documentation":"

The request has failed because the AWS Transfer for SFTP service is not available.

", + "documentation":"

The request has failed because the AWS Transfer Family service is not available.

", "exception":true, "fault":true, "synthetic":true }, + "SourceIp":{ + "type":"string", + "max":32, + "pattern":"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$" + }, "SshPublicKey":{ "type":"structure", "required":[ @@ -1071,18 +1123,18 @@ "members":{ "DateImported":{ "shape":"DateImported", - "documentation":"

The date that the public key was added to the user account.

" + "documentation":"

Specifies the date that the public key was added to the user account.

" }, "SshPublicKeyBody":{ "shape":"SshPublicKeyBody", - "documentation":"

The content of the SSH public key as specified by the PublicKeyId.

" + "documentation":"

Specifies the content of the SSH public key as specified by the PublicKeyId.

" }, "SshPublicKeyId":{ "shape":"SshPublicKeyId", - "documentation":"

The SshPublicKeyId parameter contains the identifier of the public key.

" + "documentation":"

Specifies the SshPublicKeyId parameter contains the identifier of the public key.

" } }, - "documentation":"

Provides information about the public Secure Shell (SSH) key that is associated with a user account for a specific server (as identified by ServerId). The information returned includes the date the key was imported, the public key contents, and the public key ID. A user can store more than one SSH public key associated with their user name on a specific SFTP server.

" + "documentation":"

Provides information about the public Secure Shell (SSH) key that is associated with a user account for the specific file transfer protocol-enabled server (as identified by ServerId). The information returned includes the date the key was imported, the public key contents, and the public key ID. A user can store more than one SSH public key associated with their user name on a specific server.

" }, "SshPublicKeyBody":{ "type":"string", @@ -1107,13 +1159,13 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server that you start.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that you start.

" } } }, "State":{ "type":"string", - "documentation":"

Describes the condition of the SFTP server with respect to its ability to perform file operations. There are six possible states: OFFLINE, ONLINE, STARTING, STOPPING, START_FAILED, and STOP_FAILED.

OFFLINE indicates that the SFTP server exists, but that it is not available for file operations. ONLINE indicates that the SFTP server is available to perform file operations. STARTING indicates that the SFTP server's was instantiated, but the server is not yet available to perform file operations. Under normal conditions, it can take a couple of minutes for an SFTP server to be completely operational. Both START_FAILED and STOP_FAILED are error conditions.

", + "documentation":"

Describes the condition of a file transfer protocol-enabled server with respect to its ability to perform file operations. There are six possible states: OFFLINE, ONLINE, STARTING, STOPPING, START_FAILED, and STOP_FAILED.

OFFLINE indicates that the server exists, but that it is not available for file operations. ONLINE indicates that the server is available to perform file operations. STARTING indicates that the server's was instantiated, but the server is not yet available to perform file operations. Under normal conditions, it can take a couple of minutes for the server to be completely operational. Both START_FAILED and STOP_FAILED are error conditions.

", "enum":[ "OFFLINE", "ONLINE", @@ -1130,7 +1182,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server that you stopped.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that you stopped.

" } } }, @@ -1152,7 +1204,7 @@ }, "Value":{ "shape":"TagValue", - "documentation":"

This property contains one or more values that you assigned to the key name you create.

" + "documentation":"

Contains one or more values that you assigned to the key name you create.

" } }, "documentation":"

Creates a key-value pair for a specific resource. Tags are metadata that you can use to search for and group a resource for various purposes. You can apply tags to servers, users, and roles. A tag key can take more than one value. For example, to group servers for accounting purposes, you might create a tag called Group and assign the values Research and Accounting to that group.

" @@ -1203,11 +1255,19 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned identifier for a specific server. That server's user authentication method is tested with a user name and password.

" + "documentation":"

A system-assigned identifier for a specific file transfer protocol-enabled server. That server's user authentication method is tested with a user name and password.

" + }, + "ServerProtocol":{ + "shape":"Protocol", + "documentation":"

The type of file transfer protocol to be tested.

The available protocols are:

  • Secure Shell (SSH) File Transfer Protocol (SFTP)

  • File Transfer Protocol Secure (FTPS)

  • File Transfer Protocol (FTP)

" + }, + "SourceIp":{ + "shape":"SourceIp", + "documentation":"

The source IP address of the user account to be tested.

" }, "UserName":{ "shape":"UserName", - "documentation":"

This request parameter is the name of the user account to be tested.

" + "documentation":"

The name of the user account to be tested.

" }, "UserPassword":{ "shape":"UserPassword", @@ -1232,7 +1292,7 @@ }, "Message":{ "shape":"Message", - "documentation":"

A message that indicates whether the test was successful or not.

" + "documentation":"

A message that indicates whether the test was successful or not.

" }, "Url":{ "shape":"Url", @@ -1257,7 +1317,7 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

This is the value of the resource that will have the tag removed. An Amazon Resource Name (ARN) is an identifier for a specific AWS resource, such as a server, user, or role.

" + "documentation":"

The value of the resource that will have the tag removed. An Amazon Resource Name (ARN) is an identifier for a specific AWS resource, such as a server, user, or role.

" }, "TagKeys":{ "shape":"TagKeys", @@ -1269,29 +1329,37 @@ "type":"structure", "required":["ServerId"], "members":{ + "Certificate":{ + "shape":"Certificate", + "documentation":"

The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when Protocols is set to FTPS.

To request a new public certificate, see Request a public certificate in the AWS Certificate Manager User Guide.

To import an existing certificate into ACM, see Importing certificates into ACM in the AWS Certificate Manager User Guide.

To request a private certificate to use FTPS through private IP addresses, see Request a private certificate in the AWS Certificate Manager User Guide.

Certificates with the following cryptographic algorithms and key sizes are supported:

  • 2048-bit RSA (RSA_2048)

  • 4096-bit RSA (RSA_4096)

  • Elliptic Prime Curve 256 bit (EC_prime256v1)

  • Elliptic Prime Curve 384 bit (EC_secp384r1)

  • Elliptic Prime Curve 521 bit (EC_secp521r1)

The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer.

" + }, "EndpointDetails":{ "shape":"EndpointDetails", - "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your SFTP server. With a VPC endpoint, you can restrict access to your SFTP server to resources only within your VPC. To control incoming internet traffic, you will need to associate one or more Elastic IP addresses with your server's endpoint.

" + "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server to resources only within your VPC. To control incoming internet traffic, you will need to associate one or more Elastic IP addresses with your server's endpoint.

" }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of endpoint that you want your SFTP server to connect to. You can choose to connect to the public internet or a virtual private cloud (VPC) endpoint. With a VPC endpoint, your SFTP server isn't accessible over the public internet.

" + "documentation":"

The type of endpoint that you want your file transfer protocol-enabled server to connect to. You can choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, you can restrict access to your server and resources only within your VPC.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" }, "HostKey":{ "shape":"HostKey", - "documentation":"

The RSA private key as generated by ssh-keygen -N \"\" -f my-new-server-key.

If you aren't planning to migrate existing users from an existing SFTP server to a new AWS SFTP server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see \"https://docs.aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key\" in the AWS SFTP User Guide.

" + "documentation":"

The RSA private key as generated by ssh-keygen -N \"\" -m PEM -f my-new-server-key.

If you aren't planning to migrate existing users from an existing file transfer protocol-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive.

For more information, see Change the host key for your SFTP-enabled server in the AWS Transfer Family User Guide.

" }, "IdentityProviderDetails":{ "shape":"IdentityProviderDetails", - "documentation":"

This response parameter is an array containing all of the information required to call a customer's authentication API method.

" + "documentation":"

An array containing all of the information required to call a customer's authentication API method.

" }, "LoggingRole":{ "shape":"NullableRole", - "documentation":"

A value that changes the AWS Identity and Access Management (IAM) role that allows Amazon S3 events to be logged in Amazon CloudWatch, turning logging on or off.

" + "documentation":"

Changes the AWS Identity and Access Management (IAM) role that allows Amazon S3 events to be logged in Amazon CloudWatch, turning logging on or off.

" + }, + "Protocols":{ + "shape":"Protocols", + "documentation":"

Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:

  • Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over SSH

  • File Transfer Protocol Secure (FTPS): File transfer with TLS encryption

  • File Transfer Protocol (FTP): Unencrypted file transfer

If you select FTPS, you must choose a certificate stored in AWS Certificate Manager (ACM) which will be used to identify your server when clients connect to it over FTPS.

If Protocol includes either FTP or FTPS, then the EndpointType must be VPC and the IdentityProviderType must be API_GATEWAY.

If Protocol includes FTP, then AddressAllocationIds cannot be associated.

If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and the IdentityProviderType can be set to SERVICE_MANAGED.

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance that the user account is assigned to.

" } } }, @@ -1301,7 +1369,7 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server that the user account is assigned to.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server that the user account is assigned to.

" } } }, @@ -1314,31 +1382,31 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

A parameter that specifies the landing directory (folder) for a user when they log in to the server using their client.

An example is <your-Amazon-S3-bucket-name>/home/username.

" + "documentation":"

Specifies the landing directory (folder) for a user when they log in to the file transfer protocol-enabled server using their file transfer protocol client.

An example is your-Amazon-S3-bucket-name>/home/username.

" }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", - "documentation":"

The type of landing directory (folder) you want your users' home directory to be when they log into the SFTP serve. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make S3 paths visible to your user.

" + "documentation":"

The type of landing directory (folder) you want your users' home directory to be when they log into the file transfer protocol-enabled server. If you set it to PATH, the user will see the absolute Amazon S3 bucket paths as is in their file transfer protocol clients. If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings for how you want to make Amazon S3 paths visible to your users.

" }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that specify what S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your AWS IAM Role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in S3, the entry will be ignored. As a workaround, you can use the S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" + "documentation":"

Logical directory mappings that specify what Amazon S3 paths and keys should be visible to your user and how you want to make them visible. You will need to specify the \"Entry\" and \"Target\" pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 path. If you only specify a target, it will be displayed as is. You will need to also make sure that your IAM role provides access to paths in Target. The following is an example.

'[ \"/bucket2/documentation\", { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]'

In most cases, you can use this value instead of the scope-down policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

If the target of a logical directory entry does not exist in Amazon S3, the entry will be ignored. As a workaround, you can use the Amazon S3 api to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api call instead of s3 so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" }, "Policy":{ "shape":"Policy", - "documentation":"

Allows you to supply a scope-down policy for your user so you can use the same AWS Identity and Access Management (IAM) role across multiple users. The policy scopes down user access to portions of your Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see \"https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\">Creating a Scope-Down Policy.

For more information, see \"https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html\" in the AWS Security Token Service API Reference.

" + "documentation":"

Allows you to supply a scope-down policy for your user so you can use the same IAM role across multiple users. The policy scopes down user access to portions of your Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

For scope-down policies, AWS Transfer Family stores the policy as a JSON blob, instead of the Amazon Resource Name (ARN) of the policy. You save the policy as a JSON blob and pass it in the Policy argument.

For an example of a scope-down policy, see Creating a scope-down policy.

For more information, see AssumeRole in the AWS Security Token Service API Reference.

" }, "Role":{ "shape":"Role", - "documentation":"

The IAM role that controls your user's access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the Secure File Transfer Protocol (SFTP) server to access your resources when servicing your SFTP user's transfer requests.

" + "documentation":"

The IAM role that controls your users' access to your Amazon S3 bucket. The policies attached to this role will determine the level of access you want to provide your users when transferring files into and out of your Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship that allows the file transfer protocol-enabled server to access your resources when servicing your users' transfer requests.

" }, "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance that the user account is assigned to.

" }, "UserName":{ "shape":"UserName", - "documentation":"

A unique string that identifies a user and is associated with a server as specified by the ServerId. This is the string that will be used by your user when they log in to your SFTP server. This user name is a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

" + "documentation":"

A unique string that identifies a user and is associated with a file transfer protocol-enabled server as specified by the ServerId. This is the string that will be used by your user when they log in to your server. This user name is a minimum of 3 and a maximum of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen.

" } } }, @@ -1351,14 +1419,14 @@ "members":{ "ServerId":{ "shape":"ServerId", - "documentation":"

A system-assigned unique identifier for an SFTP server instance that the user account is assigned to.

" + "documentation":"

A system-assigned unique identifier for a file transfer protocol-enabled server instance that the user account is assigned to.

" }, "UserName":{ "shape":"UserName", - "documentation":"

The unique identifier for a user that is assigned to the SFTP server instance that was specified in the request.

" + "documentation":"

The unique identifier for a user that is assigned to a file transfer protocol-enabled server instance that was specified in the request.

" } }, - "documentation":"

UpdateUserResponse returns the user name and server identifier for the request to update a user's properties.

" + "documentation":"

UpdateUserResponse returns the user name and file transfer protocol-enabled server identifier for the request to update a user's properties.

" }, "Url":{ "type":"string", @@ -1384,5 +1452,5 @@ }, "VpcId":{"type":"string"} }, - "documentation":"

AWS Transfer for SFTP is a fully managed service that enables the transfer of files directly into and out of Amazon S3 using the Secure File Transfer Protocol (SFTP)—also known as Secure Shell (SSH) File Transfer Protocol. AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer for SFTP—by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53—so nothing changes for your customers and partners, or their applications. With your data in S3, you can use it with AWS services for processing, analytics, machine learning, and archiving. Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no infrastructure to buy and set up.

" + "documentation":"

AWS Transfer Family is a fully managed service that enables the transfer of files over the the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3). AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with AWS services for processing, analytics, machine learning, and archiving. Getting started with AWS Transfer Family is easy since there is no infrastructure to buy and set up.

" } diff --git a/services/translate/pom.xml b/services/translate/pom.xml index 805953069fd6..cea52b3ac5bf 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 translate diff --git a/services/waf/pom.xml b/services/waf/pom.xml index f7ffdc5ab891..4ad5ef58793f 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/waf/src/main/resources/codegen-resources/waf/service-2.json b/services/waf/src/main/resources/codegen-resources/waf/service-2.json index e3abe89679b3..1f60141e6aea 100644 --- a/services/waf/src/main/resources/codegen-resources/waf/service-2.json +++ b/services/waf/src/main/resources/codegen-resources/waf/service-2.json @@ -29,7 +29,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a ByteMatchSet that matches any requests with User-Agent headers that contain the string BadBot. You can then configure AWS WAF to reject those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateByteMatchSet request.

  2. Submit a CreateByteMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  4. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a ByteMatchSet that matches any requests with User-Agent headers that contain the string BadBot. You can then configure AWS WAF to reject those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateByteMatchSet request.

  2. Submit a CreateByteMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  4. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateGeoMatchSet":{ "name":"CreateGeoMatchSet", @@ -47,7 +47,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates an GeoMatchSet, which you use to specify which web requests you want to allow or block based on the country that the requests originate from. For example, if you're receiving a lot of requests from one or more countries and you want to block the requests, you can create an GeoMatchSet that contains those countries and then configure AWS WAF to block the requests.

To create and configure a GeoMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateGeoMatchSet request.

  2. Submit a CreateGeoMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateGeoMatchSet request.

  4. Submit an UpdateGeoMatchSetSet request to specify the countries that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates an GeoMatchSet, which you use to specify which web requests you want to allow or block based on the country that the requests originate from. For example, if you're receiving a lot of requests from one or more countries and you want to block the requests, you can create an GeoMatchSet that contains those countries and then configure AWS WAF to block the requests.

To create and configure a GeoMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateGeoMatchSet request.

  2. Submit a CreateGeoMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateGeoMatchSet request.

  4. Submit an UpdateGeoMatchSetSet request to specify the countries that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateIPSet":{ "name":"CreateIPSet", @@ -65,7 +65,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates an IPSet, which you use to specify which web requests that you want to allow or block based on the IP addresses that the requests originate from. For example, if you're receiving a lot of requests from one or more individual IP addresses or one or more ranges of IP addresses and you want to block the requests, you can create an IPSet that contains those IP addresses and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateIPSet request.

  2. Submit a CreateIPSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  4. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates an IPSet, which you use to specify which web requests that you want to allow or block based on the IP addresses that the requests originate from. For example, if you're receiving a lot of requests from one or more individual IP addresses or one or more ranges of IP addresses and you want to block the requests, you can create an IPSet that contains those IP addresses and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateIPSet request.

  2. Submit a CreateIPSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  4. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateRateBasedRule":{ "name":"CreateRateBasedRule", @@ -85,7 +85,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFBadRequestException"} ], - "documentation":"

Creates a RateBasedRule. The RateBasedRule contains a RateLimit, which specifies the maximum number of requests that AWS WAF allows from a specified IP address in a five-minute period. The RateBasedRule also contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to count or block if these requests exceed the RateLimit.

If you add more than one predicate to a RateBasedRule, a request not only must exceed the RateLimit, but it also must match all the specifications to be counted or blocked. For example, suppose you add the following to a RateBasedRule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

Further, you specify a RateLimit of 15,000.

You then add the RateBasedRule to a WebACL and specify that you want to block requests that meet the conditions in the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot. Further, requests that match these two conditions must be received at a rate of more than 15,000 requests every five minutes. If both conditions are met and the rate is exceeded, AWS WAF blocks the requests. If the rate drops below 15,000 for a five-minute period, AWS WAF no longer blocks the requests.

As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a RateBasedRule:

  • A ByteMatchSet with FieldToMatch of URI

  • A PositionalConstraint of STARTS_WITH

  • A TargetString of login

Further, you specify a RateLimit of 15,000.

By adding this RateBasedRule to a WebACL, you could limit requests to your login page without affecting the rest of your site.

To create and configure a RateBasedRule, perform the following steps:

  1. Create and update the predicates that you want to include in the rule. For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.

  3. Submit a CreateRateBasedRule request.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  5. Submit an UpdateRateBasedRule request to specify the predicates that you want to include in the rule.

  6. Create and update a WebACL that contains the RateBasedRule. For more information, see CreateWebACL.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a RateBasedRule. The RateBasedRule contains a RateLimit, which specifies the maximum number of requests that AWS WAF allows from a specified IP address in a five-minute period. The RateBasedRule also contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to count or block if these requests exceed the RateLimit.

If you add more than one predicate to a RateBasedRule, a request not only must exceed the RateLimit, but it also must match all the conditions to be counted or blocked. For example, suppose you add the following to a RateBasedRule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

Further, you specify a RateLimit of 1,000.

You then add the RateBasedRule to a WebACL and specify that you want to block requests that meet the conditions in the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot. Further, requests that match these two conditions must be received at a rate of more than 1,000 requests every five minutes. If both conditions are met and the rate is exceeded, AWS WAF blocks the requests. If the rate drops below 1,000 for a five-minute period, AWS WAF no longer blocks the requests.

As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a RateBasedRule:

  • A ByteMatchSet with FieldToMatch of URI

  • A PositionalConstraint of STARTS_WITH

  • A TargetString of login

Further, you specify a RateLimit of 1,000.

By adding this RateBasedRule to a WebACL, you could limit requests to your login page without affecting the rest of your site.

To create and configure a RateBasedRule, perform the following steps:

  1. Create and update the predicates that you want to include in the rule. For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.

  3. Submit a CreateRateBasedRule request.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  5. Submit an UpdateRateBasedRule request to specify the predicates that you want to include in the rule.

  6. Create and update a WebACL that contains the RateBasedRule. For more information, see CreateWebACL.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateRegexMatchSet":{ "name":"CreateRegexMatchSet", @@ -101,7 +101,7 @@ {"shape":"WAFDisallowedNameException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates a RegexMatchSet. You then use UpdateRegexMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a RegexMatchSet that contains a RegexMatchTuple that looks for any requests with User-Agent headers that match a RegexPatternSet with pattern B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRegexMatchSet request.

  2. Submit a CreateRegexMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet request.

  4. Submit an UpdateRegexMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value, using a RegexPatternSet, that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a RegexMatchSet. You then use UpdateRegexMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a RegexMatchSet that contains a RegexMatchTuple that looks for any requests with User-Agent headers that match a RegexPatternSet with pattern B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRegexMatchSet request.

  2. Submit a CreateRegexMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet request.

  4. Submit an UpdateRegexMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value, using a RegexPatternSet, that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateRegexPatternSet":{ "name":"CreateRegexPatternSet", @@ -117,7 +117,7 @@ {"shape":"WAFDisallowedNameException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates a RegexPatternSet. You then use UpdateRegexPatternSet to specify the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexPatternSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRegexPatternSet request.

  2. Submit a CreateRegexPatternSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexPatternSet request.

  4. Submit an UpdateRegexPatternSet request to specify the string that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a RegexPatternSet. You then use UpdateRegexPatternSet to specify the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexPatternSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRegexPatternSet request.

  2. Submit a CreateRegexPatternSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexPatternSet request.

  4. Submit an UpdateRegexPatternSet request to specify the string that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateRule":{ "name":"CreateRule", @@ -137,7 +137,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFBadRequestException"} ], - "documentation":"

Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to block. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed or blocked. For example, suppose that you add the following to a Rule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

You then add the Rule to a WebACL and specify that you want to blocks requests that satisfy the Rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot.

To create and configure a Rule, perform the following steps:

  1. Create and update the predicates that you want to include in the Rule. For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.

  3. Submit a CreateRule request.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  5. Submit an UpdateRule request to specify the predicates that you want to include in the Rule.

  6. Create and update a WebACL that contains the Rule. For more information, see CreateWebACL.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to block. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed or blocked. For example, suppose that you add the following to a Rule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

You then add the Rule to a WebACL and specify that you want to blocks requests that satisfy the Rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot.

To create and configure a Rule, perform the following steps:

  1. Create and update the predicates that you want to include in the Rule. For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.

  3. Submit a CreateRule request.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  5. Submit an UpdateRule request to specify the predicates that you want to include in the Rule.

  6. Create and update a WebACL that contains the Rule. For more information, see CreateWebACL.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateRuleGroup":{ "name":"CreateRuleGroup", @@ -156,7 +156,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFBadRequestException"} ], - "documentation":"

Creates a RuleGroup. A rule group is a collection of predefined rules that you add to a web ACL. You use UpdateRuleGroup to add rules to the rule group.

Rule groups are subject to the following limits:

  • Three rule groups per account. You can request an increase to this limit by contacting customer support.

  • One rule group per web ACL.

  • Ten rules per rule group.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a RuleGroup. A rule group is a collection of predefined rules that you add to a web ACL. You use UpdateRuleGroup to add rules to the rule group.

Rule groups are subject to the following limits:

  • Three rule groups per account. You can request an increase to this limit by contacting customer support.

  • One rule group per web ACL.

  • Ten rules per rule group.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateSizeConstraintSet":{ "name":"CreateSizeConstraintSet", @@ -174,7 +174,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify the part of a web request that you want AWS WAF to check for length, such as the length of the User-Agent header or the length of the query string. For example, you can create a SizeConstraintSet that matches any requests that have a query string that is longer than 100 bytes. You can then configure AWS WAF to reject those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSizeConstraintSet request.

  2. Submit a CreateSizeConstraintSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  4. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify the part of a web request that you want AWS WAF to check for length, such as the length of the User-Agent header or the length of the query string. For example, you can create a SizeConstraintSet that matches any requests that have a query string that is longer than 100 bytes. You can then configure AWS WAF to reject those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSizeConstraintSet request.

  2. Submit a CreateSizeConstraintSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  4. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateSqlInjectionMatchSet":{ "name":"CreateSqlInjectionMatchSet", @@ -192,7 +192,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests that contain snippets of SQL code in a specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSqlInjectionMatchSet request.

  2. Submit a CreateSqlInjectionMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSqlInjectionMatchSet request.

  4. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests in which you want to allow, block, or count malicious SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests that contain snippets of SQL code in a specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSqlInjectionMatchSet request.

  2. Submit a CreateSqlInjectionMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSqlInjectionMatchSet request.

  4. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests in which you want to allow, block, or count malicious SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateWebACL":{ "name":"CreateWebACL", @@ -213,7 +213,24 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFBadRequestException"} ], - "documentation":"

Creates a WebACL, which contains the Rules that identify the CloudFront web requests that you want to allow, block, or count. AWS WAF evaluates Rules in order based on the value of Priority for each Rule.

You also specify a default action, either ALLOW or BLOCK. If a web request doesn't match any of the Rules in a WebACL, AWS WAF responds to the request with the default action.

To create and configure a WebACL, perform the following steps:

  1. Create and update the ByteMatchSet objects and other predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.

  2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateWebACL request.

  4. Submit a CreateWebACL request.

  5. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.

  6. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

For more information about how to use the AWS WAF API, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a WebACL, which contains the Rules that identify the CloudFront web requests that you want to allow, block, or count. AWS WAF evaluates Rules in order based on the value of Priority for each Rule.

You also specify a default action, either ALLOW or BLOCK. If a web request doesn't match any of the Rules in a WebACL, AWS WAF responds to the request with the default action.

To create and configure a WebACL, perform the following steps:

  1. Create and update the ByteMatchSet objects and other predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.

  2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateWebACL request.

  4. Submit a CreateWebACL request.

  5. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.

  6. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

For more information about how to use the AWS WAF API, see the AWS WAF Developer Guide.

" + }, + "CreateWebACLMigrationStack":{ + "name":"CreateWebACLMigrationStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWebACLMigrationStackRequest"}, + "output":{"shape":"CreateWebACLMigrationStackResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFEntityMigrationException"} + ], + "documentation":"

Creates an AWS CloudFormation WAFV2 template for the specified web ACL in the specified Amazon S3 bucket. Then, in CloudFormation, you create a stack from the template, to create the web ACL and its resources in AWS WAFV2. Use this to migrate your AWS WAF Classic web ACL to the latest version of AWS WAF.

This is part of a larger migration procedure for web ACLs from AWS WAF Classic to the latest version of AWS WAF. For the full procedure, including caveats and manual steps to complete the migration and switch over to the new web ACL, see Migrating your AWS WAF Classic resources to AWS WAF in the AWS WAF Developer Guide.

" }, "CreateXssMatchSet":{ "name":"CreateXssMatchSet", @@ -231,7 +248,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates an XssMatchSet, which you use to allow, block, or count requests that contain cross-site scripting attacks in the specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

To create and configure an XssMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateXssMatchSet request.

  2. Submit a CreateXssMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateXssMatchSet request.

  4. Submit an UpdateXssMatchSet request to specify the parts of web requests in which you want to allow, block, or count cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates an XssMatchSet, which you use to allow, block, or count requests that contain cross-site scripting attacks in the specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

To create and configure an XssMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateXssMatchSet request.

  2. Submit a CreateXssMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateXssMatchSet request.

  4. Submit an UpdateXssMatchSet request to specify the parts of web requests in which you want to allow, block, or count cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "DeleteByteMatchSet":{ "name":"DeleteByteMatchSet", @@ -249,7 +266,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's still used in any Rules or if it still includes any ByteMatchTuple objects (any filters).

If you just want to remove a ByteMatchSet from a Rule, use UpdateRule.

To permanently delete a ByteMatchSet, perform the following steps:

  1. Update the ByteMatchSet to remove filters, if any. For more information, see UpdateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteByteMatchSet request.

  3. Submit a DeleteByteMatchSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's still used in any Rules or if it still includes any ByteMatchTuple objects (any filters).

If you just want to remove a ByteMatchSet from a Rule, use UpdateRule.

To permanently delete a ByteMatchSet, perform the following steps:

  1. Update the ByteMatchSet to remove filters, if any. For more information, see UpdateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteByteMatchSet request.

  3. Submit a DeleteByteMatchSet request.

" }, "DeleteGeoMatchSet":{ "name":"DeleteGeoMatchSet", @@ -267,7 +284,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a GeoMatchSet. You can't delete a GeoMatchSet if it's still used in any Rules or if it still includes any countries.

If you just want to remove a GeoMatchSet from a Rule, use UpdateRule.

To permanently delete a GeoMatchSet from AWS WAF, perform the following steps:

  1. Update the GeoMatchSet to remove any countries. For more information, see UpdateGeoMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteGeoMatchSet request.

  3. Submit a DeleteGeoMatchSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a GeoMatchSet. You can't delete a GeoMatchSet if it's still used in any Rules or if it still includes any countries.

If you just want to remove a GeoMatchSet from a Rule, use UpdateRule.

To permanently delete a GeoMatchSet from AWS WAF, perform the following steps:

  1. Update the GeoMatchSet to remove any countries. For more information, see UpdateGeoMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteGeoMatchSet request.

  3. Submit a DeleteGeoMatchSet request.

" }, "DeleteIPSet":{ "name":"DeleteIPSet", @@ -285,7 +302,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes an IPSet. You can't delete an IPSet if it's still used in any Rules or if it still includes any IP addresses.

If you just want to remove an IPSet from a Rule, use UpdateRule.

To permanently delete an IPSet from AWS WAF, perform the following steps:

  1. Update the IPSet to remove IP address ranges, if any. For more information, see UpdateIPSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteIPSet request.

  3. Submit a DeleteIPSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes an IPSet. You can't delete an IPSet if it's still used in any Rules or if it still includes any IP addresses.

If you just want to remove an IPSet from a Rule, use UpdateRule.

To permanently delete an IPSet from AWS WAF, perform the following steps:

  1. Update the IPSet to remove IP address ranges, if any. For more information, see UpdateIPSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteIPSet request.

  3. Submit a DeleteIPSet request.

" }, "DeleteLoggingConfiguration":{ "name":"DeleteLoggingConfiguration", @@ -300,7 +317,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFStaleDataException"} ], - "documentation":"

Permanently deletes the LoggingConfiguration from the specified web ACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes the LoggingConfiguration from the specified web ACL.

" }, "DeletePermissionPolicy":{ "name":"DeletePermissionPolicy", @@ -315,7 +332,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Permanently deletes an IAM policy from the specified RuleGroup.

The user making the request must be the owner of the RuleGroup.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes an IAM policy from the specified RuleGroup.

The user making the request must be the owner of the RuleGroup.

" }, "DeleteRateBasedRule":{ "name":"DeleteRateBasedRule", @@ -335,7 +352,7 @@ {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} ], - "documentation":"

Permanently deletes a RateBasedRule. You can't delete a rule if it's still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.

If you just want to remove a rule from a WebACL, use UpdateWebACL.

To permanently delete a RateBasedRule from AWS WAF, perform the following steps:

  1. Update the RateBasedRule to remove predicates, if any. For more information, see UpdateRateBasedRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRateBasedRule request.

  3. Submit a DeleteRateBasedRule request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a RateBasedRule. You can't delete a rule if it's still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.

If you just want to remove a rule from a WebACL, use UpdateWebACL.

To permanently delete a RateBasedRule from AWS WAF, perform the following steps:

  1. Update the RateBasedRule to remove predicates, if any. For more information, see UpdateRateBasedRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRateBasedRule request.

  3. Submit a DeleteRateBasedRule request.

" }, "DeleteRegexMatchSet":{ "name":"DeleteRegexMatchSet", @@ -353,7 +370,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a RegexMatchSet. You can't delete a RegexMatchSet if it's still used in any Rules or if it still includes any RegexMatchTuples objects (any filters).

If you just want to remove a RegexMatchSet from a Rule, use UpdateRule.

To permanently delete a RegexMatchSet, perform the following steps:

  1. Update the RegexMatchSet to remove filters, if any. For more information, see UpdateRegexMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRegexMatchSet request.

  3. Submit a DeleteRegexMatchSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a RegexMatchSet. You can't delete a RegexMatchSet if it's still used in any Rules or if it still includes any RegexMatchTuples objects (any filters).

If you just want to remove a RegexMatchSet from a Rule, use UpdateRule.

To permanently delete a RegexMatchSet, perform the following steps:

  1. Update the RegexMatchSet to remove filters, if any. For more information, see UpdateRegexMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRegexMatchSet request.

  3. Submit a DeleteRegexMatchSet request.

" }, "DeleteRegexPatternSet":{ "name":"DeleteRegexPatternSet", @@ -371,7 +388,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a RegexPatternSet. You can't delete a RegexPatternSet if it's still used in any RegexMatchSet or if the RegexPatternSet is not empty.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a RegexPatternSet. You can't delete a RegexPatternSet if it's still used in any RegexMatchSet or if the RegexPatternSet is not empty.

" }, "DeleteRule":{ "name":"DeleteRule", @@ -391,7 +408,7 @@ {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} ], - "documentation":"

Permanently deletes a Rule. You can't delete a Rule if it's still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.

If you just want to remove a Rule from a WebACL, use UpdateWebACL.

To permanently delete a Rule from AWS WAF, perform the following steps:

  1. Update the Rule to remove predicates, if any. For more information, see UpdateRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRule request.

  3. Submit a DeleteRule request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a Rule. You can't delete a Rule if it's still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.

If you just want to remove a Rule from a WebACL, use UpdateWebACL.

To permanently delete a Rule from AWS WAF, perform the following steps:

  1. Update the Rule to remove predicates, if any. For more information, see UpdateRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRule request.

  3. Submit a DeleteRule request.

" }, "DeleteRuleGroup":{ "name":"DeleteRuleGroup", @@ -411,7 +428,7 @@ {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} ], - "documentation":"

Permanently deletes a RuleGroup. You can't delete a RuleGroup if it's still used in any WebACL objects or if it still includes any rules.

If you just want to remove a RuleGroup from a WebACL, use UpdateWebACL.

To permanently delete a RuleGroup from AWS WAF, perform the following steps:

  1. Update the RuleGroup to remove rules, if any. For more information, see UpdateRuleGroup.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRuleGroup request.

  3. Submit a DeleteRuleGroup request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a RuleGroup. You can't delete a RuleGroup if it's still used in any WebACL objects or if it still includes any rules.

If you just want to remove a RuleGroup from a WebACL, use UpdateWebACL.

To permanently delete a RuleGroup from AWS WAF, perform the following steps:

  1. Update the RuleGroup to remove rules, if any. For more information, see UpdateRuleGroup.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRuleGroup request.

  3. Submit a DeleteRuleGroup request.

" }, "DeleteSizeConstraintSet":{ "name":"DeleteSizeConstraintSet", @@ -429,7 +446,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet if it's still used in any Rules or if it still includes any SizeConstraint objects (any filters).

If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule.

To permanently delete a SizeConstraintSet, perform the following steps:

  1. Update the SizeConstraintSet to remove filters, if any. For more information, see UpdateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSizeConstraintSet request.

  3. Submit a DeleteSizeConstraintSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet if it's still used in any Rules or if it still includes any SizeConstraint objects (any filters).

If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule.

To permanently delete a SizeConstraintSet, perform the following steps:

  1. Update the SizeConstraintSet to remove filters, if any. For more information, see UpdateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSizeConstraintSet request.

  3. Submit a DeleteSizeConstraintSet request.

" }, "DeleteSqlInjectionMatchSet":{ "name":"DeleteSqlInjectionMatchSet", @@ -447,7 +464,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple objects.

If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule.

To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following steps:

  1. Update the SqlInjectionMatchSet to remove filters, if any. For more information, see UpdateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet request.

  3. Submit a DeleteSqlInjectionMatchSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple objects.

If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule.

To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following steps:

  1. Update the SqlInjectionMatchSet to remove filters, if any. For more information, see UpdateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet request.

  3. Submit a DeleteSqlInjectionMatchSet request.

" }, "DeleteWebACL":{ "name":"DeleteWebACL", @@ -467,7 +484,7 @@ {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} ], - "documentation":"

Permanently deletes a WebACL. You can't delete a WebACL if it still contains any Rules.

To delete a WebACL, perform the following steps:

  1. Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteWebACL request.

  3. Submit a DeleteWebACL request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a WebACL. You can't delete a WebACL if it still contains any Rules.

To delete a WebACL, perform the following steps:

  1. Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteWebACL request.

  3. Submit a DeleteWebACL request.

" }, "DeleteXssMatchSet":{ "name":"DeleteXssMatchSet", @@ -485,7 +502,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's still used in any Rules or if it still contains any XssMatchTuple objects.

If you just want to remove an XssMatchSet from a Rule, use UpdateRule.

To permanently delete an XssMatchSet from AWS WAF, perform the following steps:

  1. Update the XssMatchSet to remove filters, if any. For more information, see UpdateXssMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteXssMatchSet request.

  3. Submit a DeleteXssMatchSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's still used in any Rules or if it still contains any XssMatchTuple objects.

If you just want to remove an XssMatchSet from a Rule, use UpdateRule.

To permanently delete an XssMatchSet from AWS WAF, perform the following steps:

  1. Update the XssMatchSet to remove filters, if any. For more information, see UpdateXssMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteXssMatchSet request.

  3. Submit a DeleteXssMatchSet request.

" }, "GetByteMatchSet":{ "name":"GetByteMatchSet", @@ -500,7 +517,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the ByteMatchSet specified by ByteMatchSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the ByteMatchSet specified by ByteMatchSetId.

" }, "GetChangeToken":{ "name":"GetChangeToken", @@ -513,7 +530,7 @@ "errors":[ {"shape":"WAFInternalErrorException"} ], - "documentation":"

When you want to create, update, or delete AWS WAF objects, get a change token and include the change token in the create, update, or delete request. Change tokens ensure that your application doesn't submit conflicting requests to AWS WAF.

Each create, update, or delete request must use a unique change token. If your application submits a GetChangeToken request and then submits a second GetChangeToken request before submitting a create, update, or delete request, the second GetChangeToken request returns the same value as the first GetChangeToken request.

When you use a change token in a create, update, or delete request, the status of the change token changes to PENDING, which indicates that AWS WAF is propagating the change to all AWS WAF servers. Use GetChangeTokenStatus to determine the status of your change token.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

When you want to create, update, or delete AWS WAF objects, get a change token and include the change token in the create, update, or delete request. Change tokens ensure that your application doesn't submit conflicting requests to AWS WAF.

Each create, update, or delete request must use a unique change token. If your application submits a GetChangeToken request and then submits a second GetChangeToken request before submitting a create, update, or delete request, the second GetChangeToken request returns the same value as the first GetChangeToken request.

When you use a change token in a create, update, or delete request, the status of the change token changes to PENDING, which indicates that AWS WAF is propagating the change to all AWS WAF servers. Use GetChangeTokenStatus to determine the status of your change token.

" }, "GetChangeTokenStatus":{ "name":"GetChangeTokenStatus", @@ -527,7 +544,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInternalErrorException"} ], - "documentation":"

Returns the status of a ChangeToken that you got by calling GetChangeToken. ChangeTokenStatus is one of the following values:

  • PROVISIONED: You requested the change token by calling GetChangeToken, but you haven't used it yet in a call to create, update, or delete an AWS WAF object.

  • PENDING: AWS WAF is propagating the create, update, or delete request to all AWS WAF servers.

  • INSYNC: Propagation is complete.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the status of a ChangeToken that you got by calling GetChangeToken. ChangeTokenStatus is one of the following values:

  • PROVISIONED: You requested the change token by calling GetChangeToken, but you haven't used it yet in a call to create, update, or delete an AWS WAF object.

  • PENDING: AWS WAF is propagating the create, update, or delete request to all AWS WAF servers.

  • INSYNC: Propagation is complete.

" }, "GetGeoMatchSet":{ "name":"GetGeoMatchSet", @@ -542,7 +559,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the GeoMatchSet that is specified by GeoMatchSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the GeoMatchSet that is specified by GeoMatchSetId.

" }, "GetIPSet":{ "name":"GetIPSet", @@ -557,7 +574,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the IPSet that is specified by IPSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the IPSet that is specified by IPSetId.

" }, "GetLoggingConfiguration":{ "name":"GetLoggingConfiguration", @@ -571,7 +588,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the LoggingConfiguration for the specified web ACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the LoggingConfiguration for the specified web ACL.

" }, "GetPermissionPolicy":{ "name":"GetPermissionPolicy", @@ -585,7 +602,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the IAM policy attached to the RuleGroup.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the IAM policy attached to the RuleGroup.

" }, "GetRateBasedRule":{ "name":"GetRateBasedRule", @@ -600,7 +617,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the RateBasedRule that is specified by the RuleId that you included in the GetRateBasedRule request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the RateBasedRule that is specified by the RuleId that you included in the GetRateBasedRule request.

" }, "GetRateBasedRuleManagedKeys":{ "name":"GetRateBasedRuleManagedKeys", @@ -616,7 +633,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidParameterException"} ], - "documentation":"

Returns an array of IP addresses currently being blocked by the RateBasedRule that is specified by the RuleId. The maximum number of managed keys that will be blocked is 10,000. If more than 10,000 addresses exceed the rate limit, the 10,000 addresses with the highest rates will be blocked.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of IP addresses currently being blocked by the RateBasedRule that is specified by the RuleId. The maximum number of managed keys that will be blocked is 10,000. If more than 10,000 addresses exceed the rate limit, the 10,000 addresses with the highest rates will be blocked.

" }, "GetRegexMatchSet":{ "name":"GetRegexMatchSet", @@ -631,7 +648,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the RegexMatchSet specified by RegexMatchSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the RegexMatchSet specified by RegexMatchSetId.

" }, "GetRegexPatternSet":{ "name":"GetRegexPatternSet", @@ -646,7 +663,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the RegexPatternSet specified by RegexPatternSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the RegexPatternSet specified by RegexPatternSetId.

" }, "GetRule":{ "name":"GetRule", @@ -661,7 +678,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the Rule that is specified by the RuleId that you included in the GetRule request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the Rule that is specified by the RuleId that you included in the GetRule request.

" }, "GetRuleGroup":{ "name":"GetRuleGroup", @@ -675,7 +692,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the RuleGroup that is specified by the RuleGroupId that you included in the GetRuleGroup request.

To view the rules in a rule group, use ListActivatedRulesInRuleGroup.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the RuleGroup that is specified by the RuleGroupId that you included in the GetRuleGroup request.

To view the rules in a rule group, use ListActivatedRulesInRuleGroup.

" }, "GetSampledRequests":{ "name":"GetSampledRequests", @@ -689,7 +706,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInternalErrorException"} ], - "documentation":"

Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.

GetSampledRequests returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, GetSampledRequests returns an updated time range. This new time range indicates the actual period during which AWS WAF selected the requests in the sample.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.

GetSampledRequests returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, GetSampledRequests returns an updated time range. This new time range indicates the actual period during which AWS WAF selected the requests in the sample.

" }, "GetSizeConstraintSet":{ "name":"GetSizeConstraintSet", @@ -704,7 +721,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the SizeConstraintSet specified by SizeConstraintSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the SizeConstraintSet specified by SizeConstraintSetId.

" }, "GetSqlInjectionMatchSet":{ "name":"GetSqlInjectionMatchSet", @@ -719,7 +736,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId.

" }, "GetWebACL":{ "name":"GetWebACL", @@ -734,7 +751,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the WebACL that is specified by WebACLId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the WebACL that is specified by WebACLId.

" }, "GetXssMatchSet":{ "name":"GetXssMatchSet", @@ -749,7 +766,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the XssMatchSet that is specified by XssMatchSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the XssMatchSet that is specified by XssMatchSetId.

" }, "ListActivatedRulesInRuleGroup":{ "name":"ListActivatedRulesInRuleGroup", @@ -764,7 +781,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidParameterException"} ], - "documentation":"

Returns an array of ActivatedRule objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of ActivatedRule objects.

" }, "ListByteMatchSets":{ "name":"ListByteMatchSets", @@ -778,7 +795,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of ByteMatchSetSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of ByteMatchSetSummary objects.

" }, "ListGeoMatchSets":{ "name":"ListGeoMatchSets", @@ -792,7 +809,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of GeoMatchSetSummary objects in the response.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of GeoMatchSetSummary objects in the response.

" }, "ListIPSets":{ "name":"ListIPSets", @@ -806,7 +823,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of IPSetSummary objects in the response.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of IPSetSummary objects in the response.

" }, "ListLoggingConfigurations":{ "name":"ListLoggingConfigurations", @@ -821,7 +838,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidParameterException"} ], - "documentation":"

Returns an array of LoggingConfiguration objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of LoggingConfiguration objects.

" }, "ListRateBasedRules":{ "name":"ListRateBasedRules", @@ -835,7 +852,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of RuleSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RuleSummary objects.

" }, "ListRegexMatchSets":{ "name":"ListRegexMatchSets", @@ -849,7 +866,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of RegexMatchSetSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RegexMatchSetSummary objects.

" }, "ListRegexPatternSets":{ "name":"ListRegexPatternSets", @@ -863,7 +880,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of RegexPatternSetSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RegexPatternSetSummary objects.

" }, "ListRuleGroups":{ "name":"ListRuleGroups", @@ -876,7 +893,7 @@ "errors":[ {"shape":"WAFInternalErrorException"} ], - "documentation":"

Returns an array of RuleGroup objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RuleGroup objects.

" }, "ListRules":{ "name":"ListRules", @@ -890,7 +907,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of RuleSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RuleSummary objects.

" }, "ListSizeConstraintSets":{ "name":"ListSizeConstraintSets", @@ -904,7 +921,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of SizeConstraintSetSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of SizeConstraintSetSummary objects.

" }, "ListSqlInjectionMatchSets":{ "name":"ListSqlInjectionMatchSets", @@ -918,7 +935,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of SqlInjectionMatchSet objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of SqlInjectionMatchSet objects.

" }, "ListSubscribedRuleGroups":{ "name":"ListSubscribedRuleGroups", @@ -932,7 +949,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInternalErrorException"} ], - "documentation":"

Returns an array of RuleGroup objects that you are subscribed to.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RuleGroup objects that you are subscribed to.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -949,7 +966,8 @@ {"shape":"WAFBadRequestException"}, {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} - ] + ], + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Retrieves the tags associated with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

Tagging is only available through the API, SDKs, and CLI. You can't manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.

" }, "ListWebACLs":{ "name":"ListWebACLs", @@ -963,7 +981,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of WebACLSummary objects in the response.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of WebACLSummary objects in the response.

" }, "ListXssMatchSets":{ "name":"ListXssMatchSets", @@ -977,7 +995,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of XssMatchSet objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of XssMatchSet objects.

" }, "PutLoggingConfiguration":{ "name":"PutLoggingConfiguration", @@ -993,7 +1011,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFServiceLinkedRoleErrorException"} ], - "documentation":"

Associates a LoggingConfiguration with a specified web ACL.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the region that you are operating. However, if you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Associates a LoggingConfiguration with a specified web ACL.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the region that you are operating. However, if you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" }, "PutPermissionPolicy":{ "name":"PutPermissionPolicy", @@ -1009,7 +1027,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidPermissionPolicyException"} ], - "documentation":"

Attaches a IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.

The PutPermissionPolicy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

For more information, see IAM Policies.

An example of a valid policy parameter is shown in the Examples section below.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Attaches an IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.

The PutPermissionPolicy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

For more information, see IAM Policies.

An example of a valid policy parameter is shown in the Examples section below.

" }, "TagResource":{ "name":"TagResource", @@ -1027,7 +1045,8 @@ {"shape":"WAFBadRequestException"}, {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} - ] + ], + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Associates tags with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

Tagging is only available through the API, SDKs, and CLI. You can't manage or view tags through the AWS WAF Classic console. You can use this action to tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.

" }, "UntagResource":{ "name":"UntagResource", @@ -1044,7 +1063,8 @@ {"shape":"WAFBadRequestException"}, {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} - ] + ], + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

" }, "UpdateByteMatchSet":{ "name":"UpdateByteMatchSet", @@ -1064,7 +1084,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For each ByteMatchTuple object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a ByteMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data type.

  • Where to look, such as at the beginning or the end of a query string.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot. You can then configure AWS WAF to block those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Create a ByteMatchSet. For more information, see CreateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  3. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For each ByteMatchTuple object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a ByteMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data type.

  • Where to look, such as at the beginning or the end of a query string.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot. You can then configure AWS WAF to block those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Create a ByteMatchSet. For more information, see CreateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  3. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateGeoMatchSet":{ "name":"UpdateGeoMatchSet", @@ -1085,7 +1105,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet. For each GeoMatchConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an GeoMatchConstraint object, you delete the existing object and add a new one.

  • The Type. The only valid value for Type is Country.

  • The Value, which is a two character code for the country to add to the GeoMatchConstraint object. Valid codes are listed in GeoMatchConstraint$Value.

To create and configure an GeoMatchSet, perform the following steps:

  1. Submit a CreateGeoMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateGeoMatchSet request.

  3. Submit an UpdateGeoMatchSet request to specify the country that you want AWS WAF to watch for.

When you update an GeoMatchSet, you specify the country that you want to add and/or the country that you want to delete. If you want to change a country, you delete the existing country and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet. For each GeoMatchConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an GeoMatchConstraint object, you delete the existing object and add a new one.

  • The Type. The only valid value for Type is Country.

  • The Value, which is a two character code for the country to add to the GeoMatchConstraint object. Valid codes are listed in GeoMatchConstraint$Value.

To create and configure an GeoMatchSet, perform the following steps:

  1. Submit a CreateGeoMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateGeoMatchSet request.

  3. Submit an UpdateGeoMatchSet request to specify the country that you want AWS WAF to watch for.

When you update an GeoMatchSet, you specify the country that you want to add and/or the country that you want to delete. If you want to change a country, you delete the existing country and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateIPSet":{ "name":"UpdateIPSet", @@ -1106,7 +1126,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an IPSetDescriptor object, you delete the existing object and add a new one.

  • The IP address version, IPv4 or IPv6.

  • The IP address in CIDR notation, for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44).

AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

IPv6 addresses can be represented using any of the following formats:

  • 1111:0000:0000:0000:0000:0000:0000:0111/128

  • 1111:0:0:0:0:0:0:0111/128

  • 1111::0111/128

  • 1111::111/128

You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Submit a CreateIPSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

When you update an IPSet, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.

You can insert a maximum of 1000 addresses in a single request.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an IPSetDescriptor object, you delete the existing object and add a new one.

  • The IP address version, IPv4 or IPv6.

  • The IP address in CIDR notation, for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44).

AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

IPv6 addresses can be represented using any of the following formats:

  • 1111:0000:0000:0000:0000:0000:0000:0111/128

  • 1111:0:0:0:0:0:0:0111/128

  • 1111::0111/128

  • 1111::111/128

You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Submit a CreateIPSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

When you update an IPSet, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.

You can insert a maximum of 1000 addresses in a single request.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateRateBasedRule":{ "name":"UpdateRateBasedRule", @@ -1127,7 +1147,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes Predicate objects in a rule and updates the RateLimit in the rule.

Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests that you want to block or count. The RateLimit specifies the number of requests every five minutes that triggers the rule.

If you add more than one predicate to a RateBasedRule, a request must match all the predicates and exceed the RateLimit to be counted or blocked. For example, suppose you add the following to a RateBasedRule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

Further, you specify a RateLimit of 15,000.

You then add the RateBasedRule to a WebACL and specify that you want to block requests that satisfy the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot. Further, requests that match these two conditions much be received at a rate of more than 15,000 every five minutes. If the rate drops below this limit, AWS WAF no longer blocks the requests.

As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a RateBasedRule:

  • A ByteMatchSet with FieldToMatch of URI

  • A PositionalConstraint of STARTS_WITH

  • A TargetString of login

Further, you specify a RateLimit of 15,000.

By adding this RateBasedRule to a WebACL, you could limit requests to your login page without affecting the rest of your site.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes Predicate objects in a rule and updates the RateLimit in the rule.

Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests that you want to block or count. The RateLimit specifies the number of requests every five minutes that triggers the rule.

If you add more than one predicate to a RateBasedRule, a request must match all the predicates and exceed the RateLimit to be counted or blocked. For example, suppose you add the following to a RateBasedRule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

Further, you specify a RateLimit of 1,000.

You then add the RateBasedRule to a WebACL and specify that you want to block requests that satisfy the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot. Further, requests that match these two conditions much be received at a rate of more than 1,000 every five minutes. If the rate drops below this limit, AWS WAF no longer blocks the requests.

As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a RateBasedRule:

  • A ByteMatchSet with FieldToMatch of URI

  • A PositionalConstraint of STARTS_WITH

  • A TargetString of login

Further, you specify a RateLimit of 1,000.

By adding this RateBasedRule to a WebACL, you could limit requests to your login page without affecting the rest of your site.

" }, "UpdateRegexMatchSet":{ "name":"UpdateRegexMatchSet", @@ -1147,7 +1167,7 @@ {"shape":"WAFInvalidOperationException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Inserts or deletes RegexMatchTuple objects (filters) in a RegexMatchSet. For each RegexMatchSetUpdate object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a RegexMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspectupdate, such as a query string or the value of the User-Agent header.

  • The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can create a RegexPatternSet that matches any requests with User-Agent headers that contain the string B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexMatchSet, perform the following steps:

  1. Create a RegexMatchSet. For more information, see CreateRegexMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet request.

  3. Submit an UpdateRegexMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the identifier of the RegexPatternSet that contain the regular expression patters you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes RegexMatchTuple objects (filters) in a RegexMatchSet. For each RegexMatchSetUpdate object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a RegexMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspectupdate, such as a query string or the value of the User-Agent header.

  • The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can create a RegexPatternSet that matches any requests with User-Agent headers that contain the string B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexMatchSet, perform the following steps:

  1. Create a RegexMatchSet. For more information, see CreateRegexMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet request.

  3. Submit an UpdateRegexMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the identifier of the RegexPatternSet that contain the regular expression patters you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateRegexPatternSet":{ "name":"UpdateRegexPatternSet", @@ -1167,7 +1187,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFInvalidRegexPatternException"} ], - "documentation":"

Inserts or deletes RegexPatternString objects in a RegexPatternSet. For each RegexPatternString object, you specify the following values:

  • Whether to insert or delete the RegexPatternString.

  • The regular expression pattern that you want to insert or delete. For more information, see RegexPatternSet.

For example, you can create a RegexPatternString such as B[a@]dB[o0]t. AWS WAF will match this RegexPatternString to:

  • BadBot

  • BadB0t

  • B@dBot

  • B@dB0t

To create and configure a RegexPatternSet, perform the following steps:

  1. Create a RegexPatternSet. For more information, see CreateRegexPatternSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexPatternSet request.

  3. Submit an UpdateRegexPatternSet request to specify the regular expression pattern that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes RegexPatternString objects in a RegexPatternSet. For each RegexPatternString object, you specify the following values:

  • Whether to insert or delete the RegexPatternString.

  • The regular expression pattern that you want to insert or delete. For more information, see RegexPatternSet.

For example, you can create a RegexPatternString such as B[a@]dB[o0]t. AWS WAF will match this RegexPatternString to:

  • BadBot

  • BadB0t

  • B@dBot

  • B@dB0t

To create and configure a RegexPatternSet, perform the following steps:

  1. Create a RegexPatternSet. For more information, see CreateRegexPatternSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexPatternSet request.

  3. Submit an UpdateRegexPatternSet request to specify the regular expression pattern that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateRule":{ "name":"UpdateRule", @@ -1188,7 +1208,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests that you want to allow, block, or count. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed, blocked, or counted. For example, suppose that you add the following to a Rule:

  • A ByteMatchSet that matches the value BadBot in the User-Agent header

  • An IPSet that matches the IP address 192.0.2.44

You then add the Rule to a WebACL and specify that you want to block requests that satisfy the Rule. For a request to be blocked, the User-Agent header in the request must contain the value BadBot and the request must originate from the IP address 192.0.2.44.

To create and configure a Rule, perform the following steps:

  1. Create and update the predicates that you want to include in the Rule.

  2. Create the Rule. See CreateRule.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  4. Submit an UpdateRule request to add predicates to the Rule.

  5. Create and update a WebACL that contains the Rule. See CreateWebACL.

If you want to replace one ByteMatchSet or IPSet with another, you delete the existing one and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests that you want to allow, block, or count. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed, blocked, or counted. For example, suppose that you add the following to a Rule:

  • A ByteMatchSet that matches the value BadBot in the User-Agent header

  • An IPSet that matches the IP address 192.0.2.44

You then add the Rule to a WebACL and specify that you want to block requests that satisfy the Rule. For a request to be blocked, the User-Agent header in the request must contain the value BadBot and the request must originate from the IP address 192.0.2.44.

To create and configure a Rule, perform the following steps:

  1. Create and update the predicates that you want to include in the Rule.

  2. Create the Rule. See CreateRule.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  4. Submit an UpdateRule request to add predicates to the Rule.

  5. Create and update a WebACL that contains the Rule. See CreateWebACL.

If you want to replace one ByteMatchSet or IPSet with another, you delete the existing one and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateRuleGroup":{ "name":"UpdateRuleGroup", @@ -1207,7 +1227,7 @@ {"shape":"WAFLimitsExceededException"}, {"shape":"WAFInvalidParameterException"} ], - "documentation":"

Inserts or deletes ActivatedRule objects in a RuleGroup.

You can only insert REGULAR rules into a rule group.

You can have a maximum of ten rules per rule group.

To create and configure a RuleGroup, perform the following steps:

  1. Create and update the Rules that you want to include in the RuleGroup. See CreateRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRuleGroup request.

  3. Submit an UpdateRuleGroup request to add Rules to the RuleGroup.

  4. Create and update a WebACL that contains the RuleGroup. See CreateWebACL.

If you want to replace one Rule with another, you delete the existing one and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes ActivatedRule objects in a RuleGroup.

You can only insert REGULAR rules into a rule group.

You can have a maximum of ten rules per rule group.

To create and configure a RuleGroup, perform the following steps:

  1. Create and update the Rules that you want to include in the RuleGroup. See CreateRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRuleGroup request.

  3. Submit an UpdateRuleGroup request to add Rules to the RuleGroup.

  4. Create and update a WebACL that contains the RuleGroup. See CreateWebACL.

If you want to replace one Rule with another, you delete the existing one and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateSizeConstraintSet":{ "name":"UpdateSizeConstraintSet", @@ -1228,7 +1248,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. For each SizeConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a SizeConstraintSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the User-Agent header.

  • Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first 8192 bytes of your request to AWS WAF.

    You can only specify a single type of TextTransformation.

  • A ComparisonOperator used for evaluating the selected part of the request against the specified Size, such as equals, greater than, less than, and so on.

  • The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.

For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  3. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. For each SizeConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a SizeConstraintSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the User-Agent header.

  • Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first 8192 bytes of your request to AWS WAF.

    You can only specify a single type of TextTransformation.

  • A ComparisonOperator used for evaluating the selected part of the request against the specified Size, such as equals, greater than, less than, and so on.

  • The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.

For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  3. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateSqlInjectionMatchSet":{ "name":"UpdateSqlInjectionMatchSet", @@ -1248,7 +1268,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a SqlInjectionMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.

    You can only specify a single type of TextTransformation.

You use SqlInjectionMatchSet objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Submit a CreateSqlInjectionMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a SqlInjectionMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.

    You can only specify a single type of TextTransformation.

You use SqlInjectionMatchSet objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Submit a CreateSqlInjectionMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateWebACL":{ "name":"UpdateWebACL", @@ -1270,7 +1290,7 @@ {"shape":"WAFLimitsExceededException"}, {"shape":"WAFSubscriptionNotFoundException"} ], - "documentation":"

Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies web requests that you want to allow, block, or count. When you update a WebACL, you specify the following values:

  • A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn't match the criteria in any of the Rules in a WebACL.

  • The Rules that you want to add or delete. If you want to replace one Rule with another, you delete the existing Rule and add the new one.

  • For each Rule, whether you want AWS WAF to allow requests, block requests, or count requests that match the conditions in the Rule.

  • The order in which you want AWS WAF to evaluate the Rules in a WebACL. If you add more than one Rule to a WebACL, AWS WAF evaluates each request against the Rules in order based on the value of Priority. (The Rule that has the lowest value for Priority is evaluated first.) When a web request matches all the predicates (such as ByteMatchSets and IPSets) in a Rule, AWS WAF immediately takes the corresponding action, allow or block, and doesn't evaluate the request against the remaining Rules in the WebACL, if any.

To create and configure a WebACL, perform the following steps:

  1. Create and update the predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.

  2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.

  3. Create a WebACL. See CreateWebACL.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.

  5. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

    The ActivatedRule can be a rule group. If you specify a rule group as your ActivatedRule, you can exclude specific rules from that rule group.

    If you already have a rule group associated with a web ACL and want to submit an UpdateWebACL request to exclude certain rules from that rule group, you must first remove the rule group from the web ACL, the re-insert it again, specifying the excluded rules. For details, see ActivatedRule$ExcludedRules.

Be aware that if you try to add a RATE_BASED rule to a web ACL without setting the rule type when first creating the rule, the UpdateWebACL request will fail because the request tries to add a REGULAR rule (the default rule type) with the specified ID, which does not exist.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies web requests that you want to allow, block, or count. When you update a WebACL, you specify the following values:

  • A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn't match the criteria in any of the Rules in a WebACL.

  • The Rules that you want to add or delete. If you want to replace one Rule with another, you delete the existing Rule and add the new one.

  • For each Rule, whether you want AWS WAF to allow requests, block requests, or count requests that match the conditions in the Rule.

  • The order in which you want AWS WAF to evaluate the Rules in a WebACL. If you add more than one Rule to a WebACL, AWS WAF evaluates each request against the Rules in order based on the value of Priority. (The Rule that has the lowest value for Priority is evaluated first.) When a web request matches all the predicates (such as ByteMatchSets and IPSets) in a Rule, AWS WAF immediately takes the corresponding action, allow or block, and doesn't evaluate the request against the remaining Rules in the WebACL, if any.

To create and configure a WebACL, perform the following steps:

  1. Create and update the predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.

  2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.

  3. Create a WebACL. See CreateWebACL.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.

  5. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

    The ActivatedRule can be a rule group. If you specify a rule group as your ActivatedRule , you can exclude specific rules from that rule group.

    If you already have a rule group associated with a web ACL and want to submit an UpdateWebACL request to exclude certain rules from that rule group, you must first remove the rule group from the web ACL, the re-insert it again, specifying the excluded rules. For details, see ActivatedRule$ExcludedRules .

Be aware that if you try to add a RATE_BASED rule to a web ACL without setting the rule type when first creating the rule, the UpdateWebACL request will fail because the request tries to add a REGULAR rule (the default rule type) with the specified ID, which does not exist.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateXssMatchSet":{ "name":"UpdateXssMatchSet", @@ -1290,7 +1310,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For each XssMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change an XssMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks.

    You can only specify a single type of TextTransformation.

You use XssMatchSet objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you're receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an XssMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure an XssMatchSet, perform the following steps:

  1. Submit a CreateXssMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateXssMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For each XssMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change an XssMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks.

    You can only specify a single type of TextTransformation.

You use XssMatchSet objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you're receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an XssMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure an XssMatchSet, perform the following steps:

  1. Submit a CreateXssMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateXssMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" } }, "shapes":{ @@ -1327,7 +1347,7 @@ "documentation":"

An array of rules to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup.

Sometimes it is necessary to troubleshoot rule groups that are blocking traffic unexpectedly (false positives). One troubleshooting technique is to identify the specific rule within the rule group that is blocking the legitimate traffic and then disable (exclude) that particular rule. You can exclude rules from both your own rule groups and AWS Marketplace rule groups that have been associated with a web ACL.

Specifying ExcludedRules does not remove those rules from the rule group. Rather, it changes the action for the rules to COUNT. Therefore, requests that match an ExcludedRule are counted but not blocked. The RuleGroup owner will receive COUNT metrics for each ExcludedRule.

If you want to exclude rules from a rule group that is already associated with a web ACL, perform the following steps:

  1. Use the AWS WAF logs to identify the IDs of the rules that you want to exclude. For more information about the logs, see Logging Web ACL Traffic Information.

  2. Submit an UpdateWebACL request that has two actions:

    • The first action deletes the existing rule group from the web ACL. That is, in the UpdateWebACL request, the first Updates:Action should be DELETE and Updates:ActivatedRule:RuleId should be the rule group that contains the rules that you want to exclude.

    • The second action inserts the same rule group back in, but specifying the rules to exclude. That is, the second Updates:Action should be INSERT, Updates:ActivatedRule:RuleId should be the rule group that you just removed, and ExcludedRules should contain the rules that you want to exclude.

" } }, - "documentation":"

The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL, and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW, BLOCK, or COUNT).

To specify whether to insert or delete a Rule, use the Action parameter in the WebACLUpdate data type.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL, and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW, BLOCK, or COUNT).

To specify whether to insert or delete a Rule, use the Action parameter in the WebACLUpdate data type.

" }, "ActivatedRules":{ "type":"list", @@ -1353,7 +1373,7 @@ "documentation":"

Specifies the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.

" } }, - "documentation":"

In a GetByteMatchSet request, ByteMatchSet is a complex type that contains the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified when you updated the ByteMatchSet.

A complex type that contains ByteMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In a GetByteMatchSet request, ByteMatchSet is a complex type that contains the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified when you updated the ByteMatchSet.

A complex type that contains ByteMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

" }, "ByteMatchSetSummaries":{ "type":"list", @@ -1375,7 +1395,7 @@ "documentation":"

A friendly name or description of the ByteMatchSet. You can't change Name after you create a ByteMatchSet.

" } }, - "documentation":"

Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the Name and ByteMatchSetId for one ByteMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the Name and ByteMatchSetId for one ByteMatchSet.

" }, "ByteMatchSetUpdate":{ "type":"structure", @@ -1393,7 +1413,7 @@ "documentation":"

Information about the part of a web request that you want AWS WAF to inspect and the value that you want AWS WAF to search for. If you specify DELETE for the value of Action, the ByteMatchTuple values must exactly match the values in the ByteMatchTuple that you want to delete from the ByteMatchSet.

" } }, - "documentation":"

In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple.

" }, "ByteMatchSetUpdates":{ "type":"list", @@ -1420,14 +1440,14 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on TargetString before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" }, "PositionalConstraint":{ "shape":"PositionalConstraint", "documentation":"

Within the portion of a web request that you want to search (for example, in the query string, if any), specify where you want AWS WAF to search. Valid values include the following:

CONTAINS

The specified part of the web request must include the value of TargetString, but the location doesn't matter.

CONTAINS_WORD

The specified part of the web request must include the value of TargetString, and TargetString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means one of the following:

  • TargetString exactly matches the value of the specified part of the web request, such as the value of a header.

  • TargetString is at the beginning of the specified part of the web request and is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot;.

  • TargetString is at the end of the specified part of the web request and is preceded by a character other than an alphanumeric character or underscore (_), for example, ;BadBot.

  • TargetString is in the middle of the specified part of the web request and is preceded and followed by characters other than alphanumeric characters or underscore (_), for example, -BadBot;.

EXACTLY

The value of the specified part of the web request must exactly match the value of TargetString.

STARTS_WITH

The value of TargetString must appear at the beginning of the specified part of the web request.

ENDS_WITH

The value of TargetString must appear at the end of the specified part of the web request.

" } }, - "documentation":"

The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.

" }, "ByteMatchTuples":{ "type":"list", @@ -1442,7 +1462,9 @@ }, "ChangeToken":{ "type":"string", - "min":1 + "max":128, + "min":1, + "pattern":".*\\S.*" }, "ChangeTokenStatus":{ "type":"string", @@ -1584,7 +1606,10 @@ "shape":"ChangeToken", "documentation":"

The ChangeToken that you used to submit the CreateRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

" }, - "Tags":{"shape":"TagList"} + "Tags":{ + "shape":"TagList", + "documentation":"

" + } } }, "CreateRateBasedRuleResponse":{ @@ -1680,7 +1705,10 @@ "shape":"ChangeToken", "documentation":"

The value returned by the most recent call to GetChangeToken.

" }, - "Tags":{"shape":"TagList"} + "Tags":{ + "shape":"TagList", + "documentation":"

" + } } }, "CreateRuleGroupResponse":{ @@ -1716,7 +1744,10 @@ "shape":"ChangeToken", "documentation":"

The value returned by the most recent call to GetChangeToken.

" }, - "Tags":{"shape":"TagList"} + "Tags":{ + "shape":"TagList", + "documentation":"

" + } } }, "CreateRuleResponse":{ @@ -1794,6 +1825,38 @@ }, "documentation":"

The response to a CreateSqlInjectionMatchSet request.

" }, + "CreateWebACLMigrationStackRequest":{ + "type":"structure", + "required":[ + "WebACLId", + "S3BucketName", + "IgnoreUnsupportedType" + ], + "members":{ + "WebACLId":{ + "shape":"ResourceId", + "documentation":"

The UUID of the WAF Classic web ACL that you want to migrate to WAF v2.

" + }, + "S3BucketName":{ + "shape":"S3BucketName", + "documentation":"

The name of the Amazon S3 bucket to store the CloudFormation template in. The S3 bucket must be configured as follows for the migration:

  • The bucket name must start with aws-waf-migration-. For example, aws-waf-migration-my-web-acl.

  • The bucket must be in the Region where you are deploying the template. For example, for a web ACL in us-west-2, you must use an Amazon S3 bucket in us-west-2 and you must deploy the template stack to us-west-2.

  • The bucket policies must permit the migration process to write data. For listings of the bucket policies, see the Examples section.

" + }, + "IgnoreUnsupportedType":{ + "shape":"IgnoreUnsupportedType", + "documentation":"

Indicates whether to exclude entities that can't be migrated or to stop the migration. Set this to true to ignore unsupported entities in the web ACL during the migration. Otherwise, if AWS WAF encounters unsupported entities, it stops the process and throws an exception.

" + } + } + }, + "CreateWebACLMigrationStackResponse":{ + "type":"structure", + "required":["S3ObjectUrl"], + "members":{ + "S3ObjectUrl":{ + "shape":"S3ObjectUrl", + "documentation":"

The URL of the template created in Amazon S3.

" + } + } + }, "CreateWebACLRequest":{ "type":"structure", "required":[ @@ -1819,7 +1882,10 @@ "shape":"ChangeToken", "documentation":"

The value returned by the most recent call to GetChangeToken.

" }, - "Tags":{"shape":"TagList"} + "Tags":{ + "shape":"TagList", + "documentation":"

" + } } }, "CreateWebACLResponse":{ @@ -2213,6 +2279,7 @@ }, "documentation":"

The response to a request to delete an XssMatchSet from AWS WAF.

" }, + "ErrorReason":{"type":"string"}, "ExcludedRule":{ "type":"structure", "required":["RuleId"], @@ -2222,7 +2289,7 @@ "documentation":"

The unique identifier for the rule to exclude from the rule group.

" } }, - "documentation":"

The rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup. The rule must belong to the RuleGroup that is specified by the ActivatedRule.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup. The rule must belong to the RuleGroup that is specified by the ActivatedRule.

" }, "ExcludedRules":{ "type":"list", @@ -2241,7 +2308,7 @@ "documentation":"

When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. The name of the header is not case sensitive.

When the value of Type is SINGLE_QUERY_ARG, enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion. The parameter name is not case sensitive.

If the value of Type is any other value, omit Data.

" } }, - "documentation":"

Specifies where in a web request to look for TargetString.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies where in a web request to look for TargetString.

" }, "GeoMatchConstraint":{ "type":"structure", @@ -2259,7 +2326,7 @@ "documentation":"

The country that you want AWS WAF to search for.

" } }, - "documentation":"

The country from which web requests originate that you want AWS WAF to search for.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The country from which web requests originate that you want AWS WAF to search for.

" }, "GeoMatchConstraintType":{ "type":"string", @@ -2543,7 +2610,7 @@ "documentation":"

An array of GeoMatchConstraint objects, which contain the country that you want AWS WAF to search for.

" } }, - "documentation":"

Contains one or more countries that AWS WAF will search for.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains one or more countries that AWS WAF will search for.

" }, "GeoMatchSetSummaries":{ "type":"list", @@ -2565,7 +2632,7 @@ "documentation":"

A friendly name or description of the GeoMatchSet. You can't change the name of an GeoMatchSet after you create it.

" } }, - "documentation":"

Contains the identifier and the name of the GeoMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the identifier and the name of the GeoMatchSet.

" }, "GeoMatchSetUpdate":{ "type":"structure", @@ -2583,7 +2650,7 @@ "documentation":"

The country from which web requests originate that you want AWS WAF to search for.

" } }, - "documentation":"

Specifies the type of update to perform to an GeoMatchSet with UpdateGeoMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the type of update to perform to an GeoMatchSet with UpdateGeoMatchSet.

" }, "GeoMatchSetUpdates":{ "type":"list", @@ -2864,7 +2931,7 @@ }, "TimeWindow":{ "shape":"TimeWindow", - "documentation":"

The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. Specify the date and time in the following format: \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" + "documentation":"

The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" }, "MaxItems":{ "shape":"GetSampledRequestsMaxItems", @@ -2885,7 +2952,7 @@ }, "TimeWindow":{ "shape":"TimeWindow", - "documentation":"

Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests.

" + "documentation":"

Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests. Times are in Coordinated Universal Time (UTC) format.

" } } }, @@ -2981,7 +3048,7 @@ "documentation":"

The value of one of the headers in the sampled web request.

" } }, - "documentation":"

The response from a GetSampledRequests request includes an HTTPHeader complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests that were returned by GetSampledRequests.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The response from a GetSampledRequests request includes an HTTPHeader complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests that were returned by GetSampledRequests.

" }, "HTTPHeaders":{ "type":"list", @@ -3016,7 +3083,7 @@ "documentation":"

A complex type that contains two values for each header in the sampled web request: the name of the header and the value of the header.

" } }, - "documentation":"

The response from a GetSampledRequests request includes an HTTPRequest complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests that were returned by GetSampledRequests.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The response from a GetSampledRequests request includes an HTTPRequest complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests that were returned by GetSampledRequests.

" }, "HTTPVersion":{"type":"string"}, "HeaderName":{"type":"string"}, @@ -3041,7 +3108,7 @@ "documentation":"

The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution and the viewer did not use an HTTP proxy or a load balancer to send the request, this is the value of the c-ip field in the CloudFront access logs.

" } }, - "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/32. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/32. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" }, "IPSetDescriptor":{ "type":"structure", @@ -3059,7 +3126,7 @@ "documentation":"

Specify an IPv4 address by using CIDR notation. For example:

  • To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32.

  • To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

Specify an IPv6 address by using CIDR notation. For example:

  • To configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128.

  • To configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64.

" } }, - "documentation":"

Specifies the IP address type (IPV4 or IPV6) and the IP address range (in CIDR format) that web requests originate from.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the IP address type (IPV4 or IPV6) and the IP address range (in CIDR format) that web requests originate from.

" }, "IPSetDescriptorType":{ "type":"string", @@ -3068,7 +3135,12 @@ "IPV6" ] }, - "IPSetDescriptorValue":{"type":"string"}, + "IPSetDescriptorValue":{ + "type":"string", + "max":50, + "min":1, + "pattern":".*\\S.*" + }, "IPSetDescriptors":{ "type":"list", "member":{"shape":"IPSetDescriptor"} @@ -3093,7 +3165,7 @@ "documentation":"

A friendly name or description of the IPSet. You can't change the name of an IPSet after you create it.

" } }, - "documentation":"

Contains the identifier and the name of the IPSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the identifier and the name of the IPSet.

" }, "IPSetUpdate":{ "type":"structure", @@ -3111,7 +3183,7 @@ "documentation":"

The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) that web requests originate from.

" } }, - "documentation":"

Specifies the type of update to perform to an IPSet with UpdateIPSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the type of update to perform to an IPSet with UpdateIPSet.

" }, "IPSetUpdates":{ "type":"list", @@ -3119,6 +3191,7 @@ "min":1 }, "IPString":{"type":"string"}, + "IgnoreUnsupportedType":{"type":"boolean"}, "ListActivatedRulesInRuleGroupRequest":{ "type":"structure", "members":{ @@ -3467,16 +3540,31 @@ "type":"structure", "required":["ResourceARN"], "members":{ - "NextMarker":{"shape":"NextMarker"}, - "Limit":{"shape":"PaginationLimit"}, - "ResourceARN":{"shape":"ResourceArn"} + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

" + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

" + }, + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

" + } } }, "ListTagsForResourceResponse":{ "type":"structure", "members":{ - "NextMarker":{"shape":"NextMarker"}, - "TagInfoForResource":{"shape":"TagInfoForResource"} + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

" + }, + "TagInfoForResource":{ + "shape":"TagInfoForResource", + "documentation":"

" + } } }, "ListWebACLsRequest":{ @@ -3559,7 +3647,7 @@ "documentation":"

The parts of the request that you want redacted from the logs. For example, if you redact the cookie field, the cookie field in the firehose will be xxx.

" } }, - "documentation":"

The Amazon Kinesis Data Firehose, RedactedFields information, and the web ACL Amazon Resource Name (ARN).

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The Amazon Kinesis Data Firehose, RedactedFields information, and the web ACL Amazon Resource Name (ARN).

" }, "LoggingConfigurations":{ "type":"list", @@ -3570,7 +3658,12 @@ "type":"list", "member":{"shape":"ManagedKey"} }, - "MatchFieldData":{"type":"string"}, + "MatchFieldData":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*\\S.*" + }, "MatchFieldType":{ "type":"string", "enum":[ @@ -3583,11 +3676,30 @@ "ALL_QUERY_ARGS" ] }, - "MetricName":{"type":"string"}, + "MetricName":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*\\S.*" + }, + "MigrationErrorType":{ + "type":"string", + "enum":[ + "ENTITY_NOT_SUPPORTED", + "ENTITY_NOT_FOUND", + "S3_BUCKET_NO_PERMISSION", + "S3_BUCKET_NOT_ACCESSIBLE", + "S3_BUCKET_NOT_FOUND", + "S3_BUCKET_INVALID_REGION", + "S3_INTERNAL_ERROR" + ] + }, "Negated":{"type":"boolean"}, "NextMarker":{ "type":"string", - "min":1 + "max":1224, + "min":1, + "pattern":".*\\S.*" }, "PaginationLimit":{ "type":"integer", @@ -3632,7 +3744,9 @@ }, "PolicyString":{ "type":"string", - "min":1 + "max":395000, + "min":1, + "pattern":".*\\S.*" }, "PopulationSize":{"type":"long"}, "PositionalConstraint":{ @@ -3666,7 +3780,7 @@ "documentation":"

A unique identifier for a predicate in a Rule, such as ByteMatchSetId or IPSetId. The ID is returned by the corresponding Create or List command.

" } }, - "documentation":"

Specifies the ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, RegexMatchSet, GeoMatchSet, and SizeConstraintSet objects that you want to add to a Rule and, for each object, indicates whether you want to negate the settings, for example, requests that do NOT originate from the IP address 192.0.2.44.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, RegexMatchSet, GeoMatchSet, and SizeConstraintSet objects that you want to add to a Rule and, for each object, indicates whether you want to negate the settings, for example, requests that do NOT originate from the IP address 192.0.2.44.

" }, "PredicateType":{ "type":"string", @@ -3759,7 +3873,7 @@ "documentation":"

The maximum number of requests, which have an identical value in the field specified by the RateKey, allowed in a five-minute period. If the number of requests exceeds the RateLimit and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.

" } }, - "documentation":"

A RateBasedRule is identical to a regular Rule, with one addition: a RateBasedRule counts the number of requests that arrive from a specified IP address every five minutes. For example, based on recent requests that you've seen from an attacker, you might create a RateBasedRule that includes the following conditions:

  • The requests come from 192.0.2.44.

  • They contain the value BadBot in the User-Agent header.

In the rule, you also define the rate limit as 15,000.

Requests that meet both of these conditions and exceed 15,000 requests every five minutes trigger the rule's action (block or count), which is defined in the web ACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A RateBasedRule is identical to a regular Rule, with one addition: a RateBasedRule counts the number of requests that arrive from a specified IP address every five minutes. For example, based on recent requests that you've seen from an attacker, you might create a RateBasedRule that includes the following conditions:

  • The requests come from 192.0.2.44.

  • They contain the value BadBot in the User-Agent header.

In the rule, you also define the rate limit as 1,000.

Requests that meet both of these conditions and exceed 1,000 requests every five minutes trigger the rule's action (block or count), which is defined in the web ACL.

" }, "RateKey":{ "type":"string", @@ -3790,7 +3904,7 @@ "documentation":"

Contains an array of RegexMatchTuple objects. Each RegexMatchTuple object contains:

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

" } }, - "documentation":"

In a GetRegexMatchSet request, RegexMatchSet is a complex type that contains the RegexMatchSetId and Name of a RegexMatchSet, and the values that you specified when you updated the RegexMatchSet.

The values are contained in a RegexMatchTuple object, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a RegexMatchSet contains more than one RegexMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In a GetRegexMatchSet request, RegexMatchSet is a complex type that contains the RegexMatchSetId and Name of a RegexMatchSet, and the values that you specified when you updated the RegexMatchSet.

The values are contained in a RegexMatchTuple object, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a RegexMatchSet contains more than one RegexMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

" }, "RegexMatchSetSummaries":{ "type":"list", @@ -3812,7 +3926,7 @@ "documentation":"

A friendly name or description of the RegexMatchSet. You can't change Name after you create a RegexMatchSet.

" } }, - "documentation":"

Returned by ListRegexMatchSets. Each RegexMatchSetSummary object includes the Name and RegexMatchSetId for one RegexMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returned by ListRegexMatchSets. Each RegexMatchSetSummary object includes the Name and RegexMatchSetId for one RegexMatchSet.

" }, "RegexMatchSetUpdate":{ "type":"structure", @@ -3830,7 +3944,7 @@ "documentation":"

Information about the part of a web request that you want AWS WAF to inspect and the identifier of the regular expression (regex) pattern that you want AWS WAF to search for. If you specify DELETE for the value of Action, the RegexMatchTuple values must exactly match the values in the RegexMatchTuple that you want to delete from the RegexMatchSet.

" } }, - "documentation":"

In an UpdateRegexMatchSet request, RegexMatchSetUpdate specifies whether to insert or delete a RegexMatchTuple and includes the settings for the RegexMatchTuple.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In an UpdateRegexMatchSet request, RegexMatchSetUpdate specifies whether to insert or delete a RegexMatchTuple and includes the settings for the RegexMatchTuple.

" }, "RegexMatchSetUpdates":{ "type":"list", @@ -3858,7 +3972,7 @@ "documentation":"

The RegexPatternSetId for a RegexPatternSet. You use RegexPatternSetId to get information about a RegexPatternSet (see GetRegexPatternSet), update a RegexPatternSet (see UpdateRegexPatternSet), insert a RegexPatternSet into a RegexMatchSet or delete one from a RegexMatchSet (see UpdateRegexMatchSet), and delete an RegexPatternSet from AWS WAF (see DeleteRegexPatternSet).

RegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets.

" } }, - "documentation":"

The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. Each RegexMatchTuple object contains:

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. Each RegexMatchTuple object contains:

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

" }, "RegexMatchTuples":{ "type":"list", @@ -3884,7 +3998,7 @@ "documentation":"

Specifies the regular expression (regex) patterns that you want AWS WAF to search for, such as B[a@]dB[o0]t.

" } }, - "documentation":"

The RegexPatternSet specifies the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The RegexPatternSet specifies the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

" }, "RegexPatternSetSummaries":{ "type":"list", @@ -3906,7 +4020,7 @@ "documentation":"

A friendly name or description of the RegexPatternSet. You can't change Name after you create a RegexPatternSet.

" } }, - "documentation":"

Returned by ListRegexPatternSets. Each RegexPatternSetSummary object includes the Name and RegexPatternSetId for one RegexPatternSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returned by ListRegexPatternSets. Each RegexPatternSetSummary object includes the Name and RegexPatternSetId for one RegexPatternSet.

" }, "RegexPatternSetUpdate":{ "type":"structure", @@ -3924,7 +4038,7 @@ "documentation":"

Specifies the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t.

" } }, - "documentation":"

In an UpdateRegexPatternSet request, RegexPatternSetUpdate specifies whether to insert or delete a RegexPatternString and includes the settings for the RegexPatternString.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In an UpdateRegexPatternSet request, RegexPatternSetUpdate specifies whether to insert or delete a RegexPatternString and includes the settings for the RegexPatternString.

" }, "RegexPatternSetUpdates":{ "type":"list", @@ -3933,7 +4047,9 @@ }, "RegexPatternString":{ "type":"string", - "min":1 + "max":512, + "min":1, + "pattern":".*" }, "RegexPatternStrings":{ "type":"list", @@ -3943,17 +4059,20 @@ "ResourceArn":{ "type":"string", "max":1224, - "min":1 + "min":1, + "pattern":".*\\S.*" }, "ResourceId":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":".*\\S.*" }, "ResourceName":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":".*\\S.*" }, "Rule":{ "type":"structure", @@ -3979,7 +4098,7 @@ "documentation":"

The Predicates object contains one Predicate element for each ByteMatchSet, IPSet, or SqlInjectionMatchSet object that you want to include in a Rule.

" } }, - "documentation":"

A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects that identify the web requests that you want to allow, block, or count. For example, you might create a Rule that includes the following predicates:

  • An IPSet that causes AWS WAF to search for web requests that originate from the IP address 192.0.2.44

  • A ByteMatchSet that causes AWS WAF to search for web requests for which the value of the User-Agent header is BadBot.

To match the settings in this Rule, a request must originate from 192.0.2.44 AND include a User-Agent header for which the value is BadBot.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects that identify the web requests that you want to allow, block, or count. For example, you might create a Rule that includes the following predicates:

  • An IPSet that causes AWS WAF to search for web requests that originate from the IP address 192.0.2.44

  • A ByteMatchSet that causes AWS WAF to search for web requests for which the value of the User-Agent header is BadBot.

To match the settings in this Rule, a request must originate from 192.0.2.44 AND include a User-Agent header for which the value is BadBot.

" }, "RuleGroup":{ "type":"structure", @@ -3998,7 +4117,7 @@ "documentation":"

A friendly name or description for the metrics for this RuleGroup. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including \"All\" and \"Default_Action.\" You can't change the name of the metric after you create the RuleGroup.

" } }, - "documentation":"

A collection of predefined rules that you can add to a web ACL.

Rule groups are subject to the following limits:

  • Three rule groups per account. You can request an increase to this limit by contacting customer support.

  • One rule group per web ACL.

  • Ten rules per rule group.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A collection of predefined rules that you can add to a web ACL.

Rule groups are subject to the following limits:

  • Three rule groups per account. You can request an increase to this limit by contacting customer support.

  • One rule group per web ACL.

  • Ten rules per rule group.

" }, "RuleGroupSummaries":{ "type":"list", @@ -4020,7 +4139,7 @@ "documentation":"

A friendly name or description of the RuleGroup. You can't change the name of a RuleGroup after you create it.

" } }, - "documentation":"

Contains the identifier and the friendly name or description of the RuleGroup.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the identifier and the friendly name or description of the RuleGroup.

" }, "RuleGroupUpdate":{ "type":"structure", @@ -4038,7 +4157,7 @@ "documentation":"

The ActivatedRule object specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL, and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW, BLOCK, or COUNT).

" } }, - "documentation":"

Specifies an ActivatedRule and indicates whether you want to add it to a RuleGroup or delete it from a RuleGroup.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies an ActivatedRule and indicates whether you want to add it to a RuleGroup or delete it from a RuleGroup.

" }, "RuleGroupUpdates":{ "type":"list", @@ -4066,7 +4185,7 @@ "documentation":"

A friendly name or description of the Rule. You can't change the name of a Rule after you create it.

" } }, - "documentation":"

Contains the identifier and the friendly name or description of the Rule.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the identifier and the friendly name or description of the Rule.

" }, "RuleUpdate":{ "type":"structure", @@ -4084,12 +4203,22 @@ "documentation":"

The ID of the Predicate (such as an IPSet) that you want to add to a Rule.

" } }, - "documentation":"

Specifies a Predicate (such as an IPSet) and indicates whether you want to add it to a Rule or delete it from a Rule.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies a Predicate (such as an IPSet) and indicates whether you want to add it to a Rule or delete it from a Rule.

" }, "RuleUpdates":{ "type":"list", "member":{"shape":"RuleUpdate"} }, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^aws-waf-migration-[0-9A-Za-z\\.\\-_]*" + }, + "S3ObjectUrl":{ + "type":"string", + "min":1 + }, "SampleWeight":{ "type":"long", "min":0 @@ -4122,7 +4251,7 @@ "documentation":"

This value is returned if the GetSampledRequests request specifies the ID of a RuleGroup rather than the ID of an individual rule. RuleWithinRuleGroup is the rule within the specified RuleGroup that matched the request listed in the response.

" } }, - "documentation":"

The response from a GetSampledRequests request includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains one SampledHTTPRequest object for each web request that is returned by GetSampledRequests.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The response from a GetSampledRequests request includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains one SampledHTTPRequest object for each web request that is returned by GetSampledRequests.

" }, "SampledHTTPRequests":{ "type":"list", @@ -4148,7 +4277,7 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

Note that if you choose BODY for the value of Type, you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.

NONE

Specify NONE if you don't want to perform any text transformations.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.

You can only specify a single type of TextTransformation.

Note that if you choose BODY for the value of Type, you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.

NONE

Specify NONE if you don't want to perform any text transformations.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

" }, "ComparisonOperator":{ "shape":"ComparisonOperator", @@ -4159,7 +4288,7 @@ "documentation":"

The size in bytes that you want AWS WAF to compare against the size of the specified FieldToMatch. AWS WAF uses this in combination with ComparisonOperator and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

Valid values for size are 0 - 21474836480 bytes (0 - 20 GB).

If you specify URI for the value of Type, the / in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

" } }, - "documentation":"

Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size, ComparisonOperator, and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size, ComparisonOperator, and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

" }, "SizeConstraintSet":{ "type":"structure", @@ -4181,7 +4310,7 @@ "documentation":"

Specifies the parts of web requests that you want to inspect the size of.

" } }, - "documentation":"

A complex type that contains SizeConstraint objects, which specify the parts of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet contains more than one SizeConstraint object, a request only needs to match one constraint to be considered a match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A complex type that contains SizeConstraint objects, which specify the parts of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet contains more than one SizeConstraint object, a request only needs to match one constraint to be considered a match.

" }, "SizeConstraintSetSummaries":{ "type":"list", @@ -4203,7 +4332,7 @@ "documentation":"

The name of the SizeConstraintSet, if any.

" } }, - "documentation":"

The Id and Name of a SizeConstraintSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The Id and Name of a SizeConstraintSet.

" }, "SizeConstraintSetUpdate":{ "type":"structure", @@ -4221,7 +4350,7 @@ "documentation":"

Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size, ComparisonOperator, and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

" } }, - "documentation":"

Specifies the part of a web request that you want to inspect the size of and indicates whether you want to add the specification to a SizeConstraintSet or delete it from a SizeConstraintSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the part of a web request that you want to inspect the size of and indicates whether you want to add the specification to a SizeConstraintSet or delete it from a SizeConstraintSet.

" }, "SizeConstraintSetUpdates":{ "type":"list", @@ -4252,7 +4381,7 @@ "documentation":"

Specifies the parts of web requests that you want to inspect for snippets of malicious SQL code.

" } }, - "documentation":"

A complex type that contains SqlInjectionMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple object, a request needs to include snippets of SQL code in only one of the specified parts of the request to be considered a match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A complex type that contains SqlInjectionMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple object, a request needs to include snippets of SQL code in only one of the specified parts of the request to be considered a match.

" }, "SqlInjectionMatchSetSummaries":{ "type":"list", @@ -4274,7 +4403,7 @@ "documentation":"

The name of the SqlInjectionMatchSet, if any, specified by Id.

" } }, - "documentation":"

The Id and Name of a SqlInjectionMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The Id and Name of a SqlInjectionMatchSet.

" }, "SqlInjectionMatchSetUpdate":{ "type":"structure", @@ -4292,7 +4421,7 @@ "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

" } }, - "documentation":"

Specifies the part of a web request that you want to inspect for snippets of malicious SQL code and indicates whether you want to add the specification to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the part of a web request that you want to inspect for snippets of malicious SQL code and indicates whether you want to add the specification to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet.

" }, "SqlInjectionMatchSetUpdates":{ "type":"list", @@ -4312,10 +4441,10 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" } }, - "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

" }, "SqlInjectionMatchTuples":{ "type":"list", @@ -4346,26 +4475,45 @@ "documentation":"

A friendly name or description for the metrics for this RuleGroup. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including \"All\" and \"Default_Action.\" You can't change the name of the metric after you create the RuleGroup.

" } }, - "documentation":"

A summary of the rule groups you are subscribed to.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A summary of the rule groups you are subscribed to.

" }, "Tag":{ "type":"structure", + "required":[ + "Key", + "Value" + ], "members":{ - "Key":{"shape":"TagKey"}, - "Value":{"shape":"TagValue"} - } + "Key":{ + "shape":"TagKey", + "documentation":"

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

" + } + }, + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

Tagging is only available through the API, SDKs, and CLI. You can't manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.

" }, "TagInfoForResource":{ "type":"structure", "members":{ - "ResourceARN":{"shape":"ResourceArn"}, - "TagList":{"shape":"TagList"} - } + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

" + }, + "TagList":{ + "shape":"TagList", + "documentation":"

" + } + }, + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Information for a tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

Tagging is only available through the API, SDKs, and CLI. You can't manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.

" }, "TagKey":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":".*\\S.*" }, "TagKeyList":{ "type":"list", @@ -4384,8 +4532,14 @@ "Tags" ], "members":{ - "ResourceARN":{"shape":"ResourceArn"}, - "Tags":{"shape":"TagList"} + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

" + } } }, "TagResourceResponse":{ @@ -4396,7 +4550,8 @@ "TagValue":{ "type":"string", "max":256, - "min":0 + "min":0, + "pattern":".*" }, "TextTransformation":{ "type":"string", @@ -4418,14 +4573,14 @@ "members":{ "StartTime":{ "shape":"Timestamp", - "documentation":"

The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. Specify the date and time in the following format: \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" + "documentation":"

The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the date and time in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" }, "EndTime":{ "shape":"Timestamp", - "documentation":"

The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. Specify the date and time in the following format: \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" + "documentation":"

The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the date and time in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" } }, - "documentation":"

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\".

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

" }, "Timestamp":{"type":"timestamp"}, "URIString":{"type":"string"}, @@ -4436,8 +4591,14 @@ "TagKeys" ], "members":{ - "ResourceARN":{"shape":"ResourceArn"}, - "TagKeys":{"shape":"TagKeyList"} + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

" + } } }, "UntagResourceResponse":{ @@ -4834,6 +4995,7 @@ "members":{ "message":{"shape":"errorMessage"} }, + "documentation":"

", "exception":true }, "WAFDisallowedNameException":{ @@ -4844,6 +5006,16 @@ "documentation":"

The name specified is invalid.

", "exception":true }, + "WAFEntityMigrationException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"}, + "MigrationErrorType":{"shape":"MigrationErrorType"}, + "MigrationErrorReason":{"shape":"ErrorReason"} + }, + "documentation":"

The operation failed due to a problem with the migration. The failure cause is provided in the exception, in the MigrationErrorType:

  • ENTITY_NOT_SUPPORTED - The web ACL has an unsupported entity but the IgnoreUnsupportedType is not set to true.

  • ENTITY_NOT_FOUND - The web ACL doesn't exist.

  • S3_BUCKET_NO_PERMISSION - You don't have permission to perform the PutObject action to the specified Amazon S3 bucket.

  • S3_BUCKET_NOT_ACCESSIBLE - The bucket policy doesn't allow AWS WAF to perform the PutObject action in the bucket.

  • S3_BUCKET_NOT_FOUND - The S3 bucket doesn't exist.

  • S3_BUCKET_INVALID_REGION - The S3 bucket is not in the same Region as the web ACL.

  • S3_INTERNAL_ERROR - AWS WAF failed to create the template in the S3 bucket for another reason.

", + "exception":true + }, "WAFInternalErrorException":{ "type":"structure", "members":{ @@ -4963,6 +5135,7 @@ "members":{ "message":{"shape":"errorMessage"} }, + "documentation":"

", "exception":true }, "WAFTagOperationInternalErrorException":{ @@ -4970,6 +5143,7 @@ "members":{ "message":{"shape":"errorMessage"} }, + "documentation":"

", "exception":true, "fault":true }, @@ -4982,7 +5156,7 @@ "documentation":"

Specifies how you want AWS WAF to respond to requests that match the settings in a Rule. Valid settings include the following:

  • ALLOW: AWS WAF allows requests

  • BLOCK: AWS WAF blocks requests

  • COUNT: AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can't specify COUNT for the default action for a WebACL.

" } }, - "documentation":"

For the action that is associated with a rule in a WebACL, specifies the action that you want AWS WAF to perform when a web request matches all of the conditions in a rule. For the default action in a WebACL, specifies the action that you want AWS WAF to take when a web request doesn't match all of the conditions in any of the rules in a WebACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

For the action that is associated with a rule in a WebACL, specifies the action that you want AWS WAF to perform when a web request matches all of the conditions in a rule. For the default action in a WebACL, specifies the action that you want AWS WAF to take when a web request doesn't match all of the conditions in any of the rules in a WebACL.

" }, "WafActionType":{ "type":"string", @@ -5001,7 +5175,7 @@ "documentation":"

COUNT overrides the action specified by the individual rule within a RuleGroup . If set to NONE, the rule's action will take place.

" } }, - "documentation":"

The action to take if any rule within the RuleGroup matches a request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The action to take if any rule within the RuleGroup matches a request.

" }, "WafOverrideActionType":{ "type":"string", @@ -5051,7 +5225,7 @@ "documentation":"

Tha Amazon Resource Name (ARN) of the web ACL.

" } }, - "documentation":"

Contains the Rules that identify the requests that you want to allow, block, or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), and the action for each Rule that you add to a WebACL, for example, block requests from specified IP addresses or block requests from specified referrers. You also associate the WebACL with a CloudFront distribution to identify the requests that you want AWS WAF to filter. If you add more than one Rule to a WebACL, a request needs to match only one of the specifications to be allowed, blocked, or counted. For more information, see UpdateWebACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the Rules that identify the requests that you want to allow, block, or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), and the action for each Rule that you add to a WebACL, for example, block requests from specified IP addresses or block requests from specified referrers. You also associate the WebACL with a CloudFront distribution to identify the requests that you want AWS WAF to filter. If you add more than one Rule to a WebACL, a request needs to match only one of the specifications to be allowed, blocked, or counted. For more information, see UpdateWebACL.

" }, "WebACLSummaries":{ "type":"list", @@ -5073,7 +5247,7 @@ "documentation":"

A friendly name or description of the WebACL. You can't change the name of a WebACL after you create it.

" } }, - "documentation":"

Contains the identifier and the name or description of the WebACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the identifier and the name or description of the WebACL.

" }, "WebACLUpdate":{ "type":"structure", @@ -5091,7 +5265,7 @@ "documentation":"

The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL, and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW, BLOCK, or COUNT).

" } }, - "documentation":"

Specifies whether to insert a Rule into or delete a Rule from a WebACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies whether to insert a Rule into or delete a Rule from a WebACL.

" }, "WebACLUpdates":{ "type":"list", @@ -5117,7 +5291,7 @@ "documentation":"

Specifies the parts of web requests that you want to inspect for cross-site scripting attacks.

" } }, - "documentation":"

A complex type that contains XssMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header. If a XssMatchSet contains more than one XssMatchTuple object, a request needs to include cross-site scripting attacks in only one of the specified parts of the request to be considered a match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A complex type that contains XssMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header. If a XssMatchSet contains more than one XssMatchTuple object, a request needs to include cross-site scripting attacks in only one of the specified parts of the request to be considered a match.

" }, "XssMatchSetSummaries":{ "type":"list", @@ -5139,7 +5313,7 @@ "documentation":"

The name of the XssMatchSet, if any, specified by Id.

" } }, - "documentation":"

The Id and Name of an XssMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The Id and Name of an XssMatchSet.

" }, "XssMatchSetUpdate":{ "type":"structure", @@ -5157,7 +5331,7 @@ "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.

" } }, - "documentation":"

Specifies the part of a web request that you want to inspect for cross-site scripting attacks and indicates whether you want to add the specification to an XssMatchSet or delete it from an XssMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the part of a web request that you want to inspect for cross-site scripting attacks and indicates whether you want to add the specification to an XssMatchSet or delete it from an XssMatchSet.

" }, "XssMatchSetUpdates":{ "type":"list", @@ -5177,10 +5351,10 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" } }, - "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.

" }, "XssMatchTuples":{ "type":"list", @@ -5188,5 +5362,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"

This is the AWS WAF API Reference for using AWS WAF with Amazon CloudFront. The AWS WAF actions and data types listed in the reference are available for protecting Amazon CloudFront distributions. You can use these actions and data types via the endpoint waf.amazonaws.com. This guide is for developers who need detailed information about the AWS WAF API actions, data types, and errors. For detailed information about AWS WAF features and an overview of how to use the AWS WAF API, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

This is the AWS WAF Classic API Reference for using AWS WAF Classic with Amazon CloudFront. The AWS WAF Classic actions and data types listed in the reference are available for protecting Amazon CloudFront distributions. You can use these actions and data types via the endpoint waf.amazonaws.com. This guide is for developers who need detailed information about the AWS WAF Classic API actions, data types, and errors. For detailed information about AWS WAF Classic features and an overview of how to use the AWS WAF Classic API, see the AWS WAF Classic in the developer guide.

" } diff --git a/services/waf/src/main/resources/codegen-resources/wafregional/service-2.json b/services/waf/src/main/resources/codegen-resources/wafregional/service-2.json index 58ae7c2f576b..bbf6036e1a32 100644 --- a/services/waf/src/main/resources/codegen-resources/wafregional/service-2.json +++ b/services/waf/src/main/resources/codegen-resources/wafregional/service-2.json @@ -28,7 +28,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFUnavailableEntityException"} ], - "documentation":"

Associates a web ACL with a resource, either an application load balancer or Amazon API Gateway stage.

" + "documentation":"

This is AWS WAF Classic Regional documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Associates a web ACL with a resource, either an application load balancer or Amazon API Gateway stage.

" }, "CreateByteMatchSet":{ "name":"CreateByteMatchSet", @@ -46,7 +46,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a ByteMatchSet that matches any requests with User-Agent headers that contain the string BadBot. You can then configure AWS WAF to reject those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateByteMatchSet request.

  2. Submit a CreateByteMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  4. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a ByteMatchSet that matches any requests with User-Agent headers that contain the string BadBot. You can then configure AWS WAF to reject those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateByteMatchSet request.

  2. Submit a CreateByteMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  4. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateGeoMatchSet":{ "name":"CreateGeoMatchSet", @@ -64,7 +64,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates an GeoMatchSet, which you use to specify which web requests you want to allow or block based on the country that the requests originate from. For example, if you're receiving a lot of requests from one or more countries and you want to block the requests, you can create an GeoMatchSet that contains those countries and then configure AWS WAF to block the requests.

To create and configure a GeoMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateGeoMatchSet request.

  2. Submit a CreateGeoMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateGeoMatchSet request.

  4. Submit an UpdateGeoMatchSetSet request to specify the countries that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates an GeoMatchSet, which you use to specify which web requests you want to allow or block based on the country that the requests originate from. For example, if you're receiving a lot of requests from one or more countries and you want to block the requests, you can create an GeoMatchSet that contains those countries and then configure AWS WAF to block the requests.

To create and configure a GeoMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateGeoMatchSet request.

  2. Submit a CreateGeoMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateGeoMatchSet request.

  4. Submit an UpdateGeoMatchSetSet request to specify the countries that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateIPSet":{ "name":"CreateIPSet", @@ -82,7 +82,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates an IPSet, which you use to specify which web requests that you want to allow or block based on the IP addresses that the requests originate from. For example, if you're receiving a lot of requests from one or more individual IP addresses or one or more ranges of IP addresses and you want to block the requests, you can create an IPSet that contains those IP addresses and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateIPSet request.

  2. Submit a CreateIPSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  4. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates an IPSet, which you use to specify which web requests that you want to allow or block based on the IP addresses that the requests originate from. For example, if you're receiving a lot of requests from one or more individual IP addresses or one or more ranges of IP addresses and you want to block the requests, you can create an IPSet that contains those IP addresses and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateIPSet request.

  2. Submit a CreateIPSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  4. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateRateBasedRule":{ "name":"CreateRateBasedRule", @@ -102,7 +102,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFBadRequestException"} ], - "documentation":"

Creates a RateBasedRule. The RateBasedRule contains a RateLimit, which specifies the maximum number of requests that AWS WAF allows from a specified IP address in a five-minute period. The RateBasedRule also contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to count or block if these requests exceed the RateLimit.

If you add more than one predicate to a RateBasedRule, a request not only must exceed the RateLimit, but it also must match all the specifications to be counted or blocked. For example, suppose you add the following to a RateBasedRule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

Further, you specify a RateLimit of 15,000.

You then add the RateBasedRule to a WebACL and specify that you want to block requests that meet the conditions in the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot. Further, requests that match these two conditions must be received at a rate of more than 15,000 requests every five minutes. If both conditions are met and the rate is exceeded, AWS WAF blocks the requests. If the rate drops below 15,000 for a five-minute period, AWS WAF no longer blocks the requests.

As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a RateBasedRule:

  • A ByteMatchSet with FieldToMatch of URI

  • A PositionalConstraint of STARTS_WITH

  • A TargetString of login

Further, you specify a RateLimit of 15,000.

By adding this RateBasedRule to a WebACL, you could limit requests to your login page without affecting the rest of your site.

To create and configure a RateBasedRule, perform the following steps:

  1. Create and update the predicates that you want to include in the rule. For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.

  3. Submit a CreateRateBasedRule request.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  5. Submit an UpdateRateBasedRule request to specify the predicates that you want to include in the rule.

  6. Create and update a WebACL that contains the RateBasedRule. For more information, see CreateWebACL.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a RateBasedRule. The RateBasedRule contains a RateLimit, which specifies the maximum number of requests that AWS WAF allows from a specified IP address in a five-minute period. The RateBasedRule also contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to count or block if these requests exceed the RateLimit.

If you add more than one predicate to a RateBasedRule, a request not only must exceed the RateLimit, but it also must match all the conditions to be counted or blocked. For example, suppose you add the following to a RateBasedRule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

Further, you specify a RateLimit of 1,000.

You then add the RateBasedRule to a WebACL and specify that you want to block requests that meet the conditions in the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot. Further, requests that match these two conditions must be received at a rate of more than 1,000 requests every five minutes. If both conditions are met and the rate is exceeded, AWS WAF blocks the requests. If the rate drops below 1,000 for a five-minute period, AWS WAF no longer blocks the requests.

As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a RateBasedRule:

  • A ByteMatchSet with FieldToMatch of URI

  • A PositionalConstraint of STARTS_WITH

  • A TargetString of login

Further, you specify a RateLimit of 1,000.

By adding this RateBasedRule to a WebACL, you could limit requests to your login page without affecting the rest of your site.

To create and configure a RateBasedRule, perform the following steps:

  1. Create and update the predicates that you want to include in the rule. For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.

  3. Submit a CreateRateBasedRule request.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  5. Submit an UpdateRateBasedRule request to specify the predicates that you want to include in the rule.

  6. Create and update a WebACL that contains the RateBasedRule. For more information, see CreateWebACL.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateRegexMatchSet":{ "name":"CreateRegexMatchSet", @@ -118,7 +118,7 @@ {"shape":"WAFDisallowedNameException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates a RegexMatchSet. You then use UpdateRegexMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a RegexMatchSet that contains a RegexMatchTuple that looks for any requests with User-Agent headers that match a RegexPatternSet with pattern B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRegexMatchSet request.

  2. Submit a CreateRegexMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet request.

  4. Submit an UpdateRegexMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value, using a RegexPatternSet, that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a RegexMatchSet. You then use UpdateRegexMatchSet to identify the part of a web request that you want AWS WAF to inspect, such as the values of the User-Agent header or the query string. For example, you can create a RegexMatchSet that contains a RegexMatchTuple that looks for any requests with User-Agent headers that match a RegexPatternSet with pattern B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRegexMatchSet request.

  2. Submit a CreateRegexMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet request.

  4. Submit an UpdateRegexMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value, using a RegexPatternSet, that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateRegexPatternSet":{ "name":"CreateRegexPatternSet", @@ -134,7 +134,7 @@ {"shape":"WAFDisallowedNameException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates a RegexPatternSet. You then use UpdateRegexPatternSet to specify the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexPatternSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRegexPatternSet request.

  2. Submit a CreateRegexPatternSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexPatternSet request.

  4. Submit an UpdateRegexPatternSet request to specify the string that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a RegexPatternSet. You then use UpdateRegexPatternSet to specify the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexPatternSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRegexPatternSet request.

  2. Submit a CreateRegexPatternSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexPatternSet request.

  4. Submit an UpdateRegexPatternSet request to specify the string that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateRule":{ "name":"CreateRule", @@ -154,7 +154,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFBadRequestException"} ], - "documentation":"

Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to block. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed or blocked. For example, suppose that you add the following to a Rule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

You then add the Rule to a WebACL and specify that you want to blocks requests that satisfy the Rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot.

To create and configure a Rule, perform the following steps:

  1. Create and update the predicates that you want to include in the Rule. For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.

  3. Submit a CreateRule request.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  5. Submit an UpdateRule request to specify the predicates that you want to include in the Rule.

  6. Create and update a WebACL that contains the Rule. For more information, see CreateWebACL.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and other predicates that identify the requests that you want to block. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed or blocked. For example, suppose that you add the following to a Rule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

You then add the Rule to a WebACL and specify that you want to blocks requests that satisfy the Rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot.

To create and configure a Rule, perform the following steps:

  1. Create and update the predicates that you want to include in the Rule. For more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateRule request.

  3. Submit a CreateRule request.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  5. Submit an UpdateRule request to specify the predicates that you want to include in the Rule.

  6. Create and update a WebACL that contains the Rule. For more information, see CreateWebACL.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateRuleGroup":{ "name":"CreateRuleGroup", @@ -173,7 +173,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFBadRequestException"} ], - "documentation":"

Creates a RuleGroup. A rule group is a collection of predefined rules that you add to a web ACL. You use UpdateRuleGroup to add rules to the rule group.

Rule groups are subject to the following limits:

  • Three rule groups per account. You can request an increase to this limit by contacting customer support.

  • One rule group per web ACL.

  • Ten rules per rule group.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a RuleGroup. A rule group is a collection of predefined rules that you add to a web ACL. You use UpdateRuleGroup to add rules to the rule group.

Rule groups are subject to the following limits:

  • Three rule groups per account. You can request an increase to this limit by contacting customer support.

  • One rule group per web ACL.

  • Ten rules per rule group.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateSizeConstraintSet":{ "name":"CreateSizeConstraintSet", @@ -191,7 +191,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify the part of a web request that you want AWS WAF to check for length, such as the length of the User-Agent header or the length of the query string. For example, you can create a SizeConstraintSet that matches any requests that have a query string that is longer than 100 bytes. You can then configure AWS WAF to reject those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSizeConstraintSet request.

  2. Submit a CreateSizeConstraintSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  4. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify the part of a web request that you want AWS WAF to check for length, such as the length of the User-Agent header or the length of the query string. For example, you can create a SizeConstraintSet that matches any requests that have a query string that is longer than 100 bytes. You can then configure AWS WAF to reject those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSizeConstraintSet request.

  2. Submit a CreateSizeConstraintSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  4. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateSqlInjectionMatchSet":{ "name":"CreateSqlInjectionMatchSet", @@ -209,7 +209,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests that contain snippets of SQL code in a specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSqlInjectionMatchSet request.

  2. Submit a CreateSqlInjectionMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSqlInjectionMatchSet request.

  4. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests in which you want to allow, block, or count malicious SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests that contain snippets of SQL code in a specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateSqlInjectionMatchSet request.

  2. Submit a CreateSqlInjectionMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSqlInjectionMatchSet request.

  4. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests in which you want to allow, block, or count malicious SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "CreateWebACL":{ "name":"CreateWebACL", @@ -230,7 +230,24 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFBadRequestException"} ], - "documentation":"

Creates a WebACL, which contains the Rules that identify the CloudFront web requests that you want to allow, block, or count. AWS WAF evaluates Rules in order based on the value of Priority for each Rule.

You also specify a default action, either ALLOW or BLOCK. If a web request doesn't match any of the Rules in a WebACL, AWS WAF responds to the request with the default action.

To create and configure a WebACL, perform the following steps:

  1. Create and update the ByteMatchSet objects and other predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.

  2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateWebACL request.

  4. Submit a CreateWebACL request.

  5. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.

  6. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

For more information about how to use the AWS WAF API, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates a WebACL, which contains the Rules that identify the CloudFront web requests that you want to allow, block, or count. AWS WAF evaluates Rules in order based on the value of Priority for each Rule.

You also specify a default action, either ALLOW or BLOCK. If a web request doesn't match any of the Rules in a WebACL, AWS WAF responds to the request with the default action.

To create and configure a WebACL, perform the following steps:

  1. Create and update the ByteMatchSet objects and other predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.

  2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateWebACL request.

  4. Submit a CreateWebACL request.

  5. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.

  6. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

For more information about how to use the AWS WAF API, see the AWS WAF Developer Guide.

" + }, + "CreateWebACLMigrationStack":{ + "name":"CreateWebACLMigrationStack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWebACLMigrationStackRequest"}, + "output":{"shape":"CreateWebACLMigrationStackResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFEntityMigrationException"} + ], + "documentation":"

Creates an AWS CloudFormation WAFV2 template for the specified web ACL in the specified Amazon S3 bucket. Then, in CloudFormation, you create a stack from the template, to create the web ACL and its resources in AWS WAFV2. Use this to migrate your AWS WAF Classic web ACL to the latest version of AWS WAF.

This is part of a larger migration procedure for web ACLs from AWS WAF Classic to the latest version of AWS WAF. For the full procedure, including caveats and manual steps to complete the migration and switch over to the new web ACL, see Migrating your AWS WAF Classic resources to AWS WAF in the AWS WAF Developer Guide.

" }, "CreateXssMatchSet":{ "name":"CreateXssMatchSet", @@ -248,7 +265,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Creates an XssMatchSet, which you use to allow, block, or count requests that contain cross-site scripting attacks in the specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

To create and configure an XssMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateXssMatchSet request.

  2. Submit a CreateXssMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateXssMatchSet request.

  4. Submit an UpdateXssMatchSet request to specify the parts of web requests in which you want to allow, block, or count cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Creates an XssMatchSet, which you use to allow, block, or count requests that contain cross-site scripting attacks in the specified part of web requests. AWS WAF searches for character sequences that are likely to be malicious strings.

To create and configure an XssMatchSet, perform the following steps:

  1. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a CreateXssMatchSet request.

  2. Submit a CreateXssMatchSet request.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateXssMatchSet request.

  4. Submit an UpdateXssMatchSet request to specify the parts of web requests in which you want to allow, block, or count cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "DeleteByteMatchSet":{ "name":"DeleteByteMatchSet", @@ -266,7 +283,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's still used in any Rules or if it still includes any ByteMatchTuple objects (any filters).

If you just want to remove a ByteMatchSet from a Rule, use UpdateRule.

To permanently delete a ByteMatchSet, perform the following steps:

  1. Update the ByteMatchSet to remove filters, if any. For more information, see UpdateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteByteMatchSet request.

  3. Submit a DeleteByteMatchSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's still used in any Rules or if it still includes any ByteMatchTuple objects (any filters).

If you just want to remove a ByteMatchSet from a Rule, use UpdateRule.

To permanently delete a ByteMatchSet, perform the following steps:

  1. Update the ByteMatchSet to remove filters, if any. For more information, see UpdateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteByteMatchSet request.

  3. Submit a DeleteByteMatchSet request.

" }, "DeleteGeoMatchSet":{ "name":"DeleteGeoMatchSet", @@ -284,7 +301,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a GeoMatchSet. You can't delete a GeoMatchSet if it's still used in any Rules or if it still includes any countries.

If you just want to remove a GeoMatchSet from a Rule, use UpdateRule.

To permanently delete a GeoMatchSet from AWS WAF, perform the following steps:

  1. Update the GeoMatchSet to remove any countries. For more information, see UpdateGeoMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteGeoMatchSet request.

  3. Submit a DeleteGeoMatchSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a GeoMatchSet. You can't delete a GeoMatchSet if it's still used in any Rules or if it still includes any countries.

If you just want to remove a GeoMatchSet from a Rule, use UpdateRule.

To permanently delete a GeoMatchSet from AWS WAF, perform the following steps:

  1. Update the GeoMatchSet to remove any countries. For more information, see UpdateGeoMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteGeoMatchSet request.

  3. Submit a DeleteGeoMatchSet request.

" }, "DeleteIPSet":{ "name":"DeleteIPSet", @@ -302,7 +319,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes an IPSet. You can't delete an IPSet if it's still used in any Rules or if it still includes any IP addresses.

If you just want to remove an IPSet from a Rule, use UpdateRule.

To permanently delete an IPSet from AWS WAF, perform the following steps:

  1. Update the IPSet to remove IP address ranges, if any. For more information, see UpdateIPSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteIPSet request.

  3. Submit a DeleteIPSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes an IPSet. You can't delete an IPSet if it's still used in any Rules or if it still includes any IP addresses.

If you just want to remove an IPSet from a Rule, use UpdateRule.

To permanently delete an IPSet from AWS WAF, perform the following steps:

  1. Update the IPSet to remove IP address ranges, if any. For more information, see UpdateIPSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteIPSet request.

  3. Submit a DeleteIPSet request.

" }, "DeleteLoggingConfiguration":{ "name":"DeleteLoggingConfiguration", @@ -317,7 +334,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFStaleDataException"} ], - "documentation":"

Permanently deletes the LoggingConfiguration from the specified web ACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes the LoggingConfiguration from the specified web ACL.

" }, "DeletePermissionPolicy":{ "name":"DeletePermissionPolicy", @@ -332,7 +349,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Permanently deletes an IAM policy from the specified RuleGroup.

The user making the request must be the owner of the RuleGroup.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes an IAM policy from the specified RuleGroup.

The user making the request must be the owner of the RuleGroup.

" }, "DeleteRateBasedRule":{ "name":"DeleteRateBasedRule", @@ -352,7 +369,7 @@ {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} ], - "documentation":"

Permanently deletes a RateBasedRule. You can't delete a rule if it's still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.

If you just want to remove a rule from a WebACL, use UpdateWebACL.

To permanently delete a RateBasedRule from AWS WAF, perform the following steps:

  1. Update the RateBasedRule to remove predicates, if any. For more information, see UpdateRateBasedRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRateBasedRule request.

  3. Submit a DeleteRateBasedRule request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a RateBasedRule. You can't delete a rule if it's still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.

If you just want to remove a rule from a WebACL, use UpdateWebACL.

To permanently delete a RateBasedRule from AWS WAF, perform the following steps:

  1. Update the RateBasedRule to remove predicates, if any. For more information, see UpdateRateBasedRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRateBasedRule request.

  3. Submit a DeleteRateBasedRule request.

" }, "DeleteRegexMatchSet":{ "name":"DeleteRegexMatchSet", @@ -370,7 +387,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a RegexMatchSet. You can't delete a RegexMatchSet if it's still used in any Rules or if it still includes any RegexMatchTuples objects (any filters).

If you just want to remove a RegexMatchSet from a Rule, use UpdateRule.

To permanently delete a RegexMatchSet, perform the following steps:

  1. Update the RegexMatchSet to remove filters, if any. For more information, see UpdateRegexMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRegexMatchSet request.

  3. Submit a DeleteRegexMatchSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a RegexMatchSet. You can't delete a RegexMatchSet if it's still used in any Rules or if it still includes any RegexMatchTuples objects (any filters).

If you just want to remove a RegexMatchSet from a Rule, use UpdateRule.

To permanently delete a RegexMatchSet, perform the following steps:

  1. Update the RegexMatchSet to remove filters, if any. For more information, see UpdateRegexMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRegexMatchSet request.

  3. Submit a DeleteRegexMatchSet request.

" }, "DeleteRegexPatternSet":{ "name":"DeleteRegexPatternSet", @@ -388,7 +405,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a RegexPatternSet. You can't delete a RegexPatternSet if it's still used in any RegexMatchSet or if the RegexPatternSet is not empty.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a RegexPatternSet. You can't delete a RegexPatternSet if it's still used in any RegexMatchSet or if the RegexPatternSet is not empty.

" }, "DeleteRule":{ "name":"DeleteRule", @@ -408,7 +425,7 @@ {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} ], - "documentation":"

Permanently deletes a Rule. You can't delete a Rule if it's still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.

If you just want to remove a Rule from a WebACL, use UpdateWebACL.

To permanently delete a Rule from AWS WAF, perform the following steps:

  1. Update the Rule to remove predicates, if any. For more information, see UpdateRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRule request.

  3. Submit a DeleteRule request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a Rule. You can't delete a Rule if it's still used in any WebACL objects or if it still includes any predicates, such as ByteMatchSet objects.

If you just want to remove a Rule from a WebACL, use UpdateWebACL.

To permanently delete a Rule from AWS WAF, perform the following steps:

  1. Update the Rule to remove predicates, if any. For more information, see UpdateRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRule request.

  3. Submit a DeleteRule request.

" }, "DeleteRuleGroup":{ "name":"DeleteRuleGroup", @@ -428,7 +445,7 @@ {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} ], - "documentation":"

Permanently deletes a RuleGroup. You can't delete a RuleGroup if it's still used in any WebACL objects or if it still includes any rules.

If you just want to remove a RuleGroup from a WebACL, use UpdateWebACL.

To permanently delete a RuleGroup from AWS WAF, perform the following steps:

  1. Update the RuleGroup to remove rules, if any. For more information, see UpdateRuleGroup.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRuleGroup request.

  3. Submit a DeleteRuleGroup request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a RuleGroup. You can't delete a RuleGroup if it's still used in any WebACL objects or if it still includes any rules.

If you just want to remove a RuleGroup from a WebACL, use UpdateWebACL.

To permanently delete a RuleGroup from AWS WAF, perform the following steps:

  1. Update the RuleGroup to remove rules, if any. For more information, see UpdateRuleGroup.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteRuleGroup request.

  3. Submit a DeleteRuleGroup request.

" }, "DeleteSizeConstraintSet":{ "name":"DeleteSizeConstraintSet", @@ -446,7 +463,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet if it's still used in any Rules or if it still includes any SizeConstraint objects (any filters).

If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule.

To permanently delete a SizeConstraintSet, perform the following steps:

  1. Update the SizeConstraintSet to remove filters, if any. For more information, see UpdateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSizeConstraintSet request.

  3. Submit a DeleteSizeConstraintSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet if it's still used in any Rules or if it still includes any SizeConstraint objects (any filters).

If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule.

To permanently delete a SizeConstraintSet, perform the following steps:

  1. Update the SizeConstraintSet to remove filters, if any. For more information, see UpdateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSizeConstraintSet request.

  3. Submit a DeleteSizeConstraintSet request.

" }, "DeleteSqlInjectionMatchSet":{ "name":"DeleteSqlInjectionMatchSet", @@ -464,7 +481,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple objects.

If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule.

To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following steps:

  1. Update the SqlInjectionMatchSet to remove filters, if any. For more information, see UpdateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet request.

  3. Submit a DeleteSqlInjectionMatchSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple objects.

If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule.

To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following steps:

  1. Update the SqlInjectionMatchSet to remove filters, if any. For more information, see UpdateSqlInjectionMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet request.

  3. Submit a DeleteSqlInjectionMatchSet request.

" }, "DeleteWebACL":{ "name":"DeleteWebACL", @@ -484,7 +501,7 @@ {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} ], - "documentation":"

Permanently deletes a WebACL. You can't delete a WebACL if it still contains any Rules.

To delete a WebACL, perform the following steps:

  1. Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteWebACL request.

  3. Submit a DeleteWebACL request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes a WebACL. You can't delete a WebACL if it still contains any Rules.

To delete a WebACL, perform the following steps:

  1. Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteWebACL request.

  3. Submit a DeleteWebACL request.

" }, "DeleteXssMatchSet":{ "name":"DeleteXssMatchSet", @@ -502,7 +519,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFNonEmptyEntityException"} ], - "documentation":"

Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's still used in any Rules or if it still contains any XssMatchTuple objects.

If you just want to remove an XssMatchSet from a Rule, use UpdateRule.

To permanently delete an XssMatchSet from AWS WAF, perform the following steps:

  1. Update the XssMatchSet to remove filters, if any. For more information, see UpdateXssMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteXssMatchSet request.

  3. Submit a DeleteXssMatchSet request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's still used in any Rules or if it still contains any XssMatchTuple objects.

If you just want to remove an XssMatchSet from a Rule, use UpdateRule.

To permanently delete an XssMatchSet from AWS WAF, perform the following steps:

  1. Update the XssMatchSet to remove filters, if any. For more information, see UpdateXssMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of a DeleteXssMatchSet request.

  3. Submit a DeleteXssMatchSet request.

" }, "DisassociateWebACL":{ "name":"DisassociateWebACL", @@ -518,7 +535,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Removes a web ACL from the specified resource, either an application load balancer or Amazon API Gateway stage.

" + "documentation":"

This is AWS WAF Classic Regional documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Removes a web ACL from the specified resource, either an application load balancer or Amazon API Gateway stage.

" }, "GetByteMatchSet":{ "name":"GetByteMatchSet", @@ -533,7 +550,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the ByteMatchSet specified by ByteMatchSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the ByteMatchSet specified by ByteMatchSetId.

" }, "GetChangeToken":{ "name":"GetChangeToken", @@ -546,7 +563,7 @@ "errors":[ {"shape":"WAFInternalErrorException"} ], - "documentation":"

When you want to create, update, or delete AWS WAF objects, get a change token and include the change token in the create, update, or delete request. Change tokens ensure that your application doesn't submit conflicting requests to AWS WAF.

Each create, update, or delete request must use a unique change token. If your application submits a GetChangeToken request and then submits a second GetChangeToken request before submitting a create, update, or delete request, the second GetChangeToken request returns the same value as the first GetChangeToken request.

When you use a change token in a create, update, or delete request, the status of the change token changes to PENDING, which indicates that AWS WAF is propagating the change to all AWS WAF servers. Use GetChangeTokenStatus to determine the status of your change token.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

When you want to create, update, or delete AWS WAF objects, get a change token and include the change token in the create, update, or delete request. Change tokens ensure that your application doesn't submit conflicting requests to AWS WAF.

Each create, update, or delete request must use a unique change token. If your application submits a GetChangeToken request and then submits a second GetChangeToken request before submitting a create, update, or delete request, the second GetChangeToken request returns the same value as the first GetChangeToken request.

When you use a change token in a create, update, or delete request, the status of the change token changes to PENDING, which indicates that AWS WAF is propagating the change to all AWS WAF servers. Use GetChangeTokenStatus to determine the status of your change token.

" }, "GetChangeTokenStatus":{ "name":"GetChangeTokenStatus", @@ -560,7 +577,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInternalErrorException"} ], - "documentation":"

Returns the status of a ChangeToken that you got by calling GetChangeToken. ChangeTokenStatus is one of the following values:

  • PROVISIONED: You requested the change token by calling GetChangeToken, but you haven't used it yet in a call to create, update, or delete an AWS WAF object.

  • PENDING: AWS WAF is propagating the create, update, or delete request to all AWS WAF servers.

  • INSYNC: Propagation is complete.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the status of a ChangeToken that you got by calling GetChangeToken. ChangeTokenStatus is one of the following values:

  • PROVISIONED: You requested the change token by calling GetChangeToken, but you haven't used it yet in a call to create, update, or delete an AWS WAF object.

  • PENDING: AWS WAF is propagating the create, update, or delete request to all AWS WAF servers.

  • INSYNC: Propagation is complete.

" }, "GetGeoMatchSet":{ "name":"GetGeoMatchSet", @@ -575,7 +592,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the GeoMatchSet that is specified by GeoMatchSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the GeoMatchSet that is specified by GeoMatchSetId.

" }, "GetIPSet":{ "name":"GetIPSet", @@ -590,7 +607,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the IPSet that is specified by IPSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the IPSet that is specified by IPSetId.

" }, "GetLoggingConfiguration":{ "name":"GetLoggingConfiguration", @@ -604,7 +621,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the LoggingConfiguration for the specified web ACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the LoggingConfiguration for the specified web ACL.

" }, "GetPermissionPolicy":{ "name":"GetPermissionPolicy", @@ -618,7 +635,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the IAM policy attached to the RuleGroup.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the IAM policy attached to the RuleGroup.

" }, "GetRateBasedRule":{ "name":"GetRateBasedRule", @@ -633,7 +650,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the RateBasedRule that is specified by the RuleId that you included in the GetRateBasedRule request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the RateBasedRule that is specified by the RuleId that you included in the GetRateBasedRule request.

" }, "GetRateBasedRuleManagedKeys":{ "name":"GetRateBasedRuleManagedKeys", @@ -649,7 +666,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidParameterException"} ], - "documentation":"

Returns an array of IP addresses currently being blocked by the RateBasedRule that is specified by the RuleId. The maximum number of managed keys that will be blocked is 10,000. If more than 10,000 addresses exceed the rate limit, the 10,000 addresses with the highest rates will be blocked.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of IP addresses currently being blocked by the RateBasedRule that is specified by the RuleId. The maximum number of managed keys that will be blocked is 10,000. If more than 10,000 addresses exceed the rate limit, the 10,000 addresses with the highest rates will be blocked.

" }, "GetRegexMatchSet":{ "name":"GetRegexMatchSet", @@ -664,7 +681,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the RegexMatchSet specified by RegexMatchSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the RegexMatchSet specified by RegexMatchSetId.

" }, "GetRegexPatternSet":{ "name":"GetRegexPatternSet", @@ -679,7 +696,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the RegexPatternSet specified by RegexPatternSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the RegexPatternSet specified by RegexPatternSetId.

" }, "GetRule":{ "name":"GetRule", @@ -694,7 +711,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the Rule that is specified by the RuleId that you included in the GetRule request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the Rule that is specified by the RuleId that you included in the GetRule request.

" }, "GetRuleGroup":{ "name":"GetRuleGroup", @@ -708,7 +725,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the RuleGroup that is specified by the RuleGroupId that you included in the GetRuleGroup request.

To view the rules in a rule group, use ListActivatedRulesInRuleGroup.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the RuleGroup that is specified by the RuleGroupId that you included in the GetRuleGroup request.

To view the rules in a rule group, use ListActivatedRulesInRuleGroup.

" }, "GetSampledRequests":{ "name":"GetSampledRequests", @@ -722,7 +739,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInternalErrorException"} ], - "documentation":"

Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.

GetSampledRequests returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, GetSampledRequests returns an updated time range. This new time range indicates the actual period during which AWS WAF selected the requests in the sample.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.

GetSampledRequests returns a time range, which is usually the time range that you specified. However, if your resource (such as a CloudFront distribution) received 5,000 requests before the specified time range elapsed, GetSampledRequests returns an updated time range. This new time range indicates the actual period during which AWS WAF selected the requests in the sample.

" }, "GetSizeConstraintSet":{ "name":"GetSizeConstraintSet", @@ -737,7 +754,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the SizeConstraintSet specified by SizeConstraintSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the SizeConstraintSet specified by SizeConstraintSetId.

" }, "GetSqlInjectionMatchSet":{ "name":"GetSqlInjectionMatchSet", @@ -752,7 +769,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId.

" }, "GetWebACL":{ "name":"GetWebACL", @@ -767,7 +784,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the WebACL that is specified by WebACLId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the WebACL that is specified by WebACLId.

" }, "GetWebACLForResource":{ "name":"GetWebACLForResource", @@ -784,7 +801,7 @@ {"shape":"WAFInvalidParameterException"}, {"shape":"WAFUnavailableEntityException"} ], - "documentation":"

Returns the web ACL for the specified resource, either an application load balancer or Amazon API Gateway stage.

" + "documentation":"

This is AWS WAF Classic Regional documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the web ACL for the specified resource, either an application load balancer or Amazon API Gateway stage.

" }, "GetXssMatchSet":{ "name":"GetXssMatchSet", @@ -799,7 +816,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFNonexistentItemException"} ], - "documentation":"

Returns the XssMatchSet that is specified by XssMatchSetId.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns the XssMatchSet that is specified by XssMatchSetId.

" }, "ListActivatedRulesInRuleGroup":{ "name":"ListActivatedRulesInRuleGroup", @@ -814,7 +831,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidParameterException"} ], - "documentation":"

Returns an array of ActivatedRule objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of ActivatedRule objects.

" }, "ListByteMatchSets":{ "name":"ListByteMatchSets", @@ -828,7 +845,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of ByteMatchSetSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of ByteMatchSetSummary objects.

" }, "ListGeoMatchSets":{ "name":"ListGeoMatchSets", @@ -842,7 +859,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of GeoMatchSetSummary objects in the response.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of GeoMatchSetSummary objects in the response.

" }, "ListIPSets":{ "name":"ListIPSets", @@ -856,7 +873,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of IPSetSummary objects in the response.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of IPSetSummary objects in the response.

" }, "ListLoggingConfigurations":{ "name":"ListLoggingConfigurations", @@ -871,7 +888,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidParameterException"} ], - "documentation":"

Returns an array of LoggingConfiguration objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of LoggingConfiguration objects.

" }, "ListRateBasedRules":{ "name":"ListRateBasedRules", @@ -885,7 +902,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of RuleSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RuleSummary objects.

" }, "ListRegexMatchSets":{ "name":"ListRegexMatchSets", @@ -899,7 +916,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of RegexMatchSetSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RegexMatchSetSummary objects.

" }, "ListRegexPatternSets":{ "name":"ListRegexPatternSets", @@ -913,7 +930,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of RegexPatternSetSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RegexPatternSetSummary objects.

" }, "ListResourcesForWebACL":{ "name":"ListResourcesForWebACL", @@ -929,7 +946,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidParameterException"} ], - "documentation":"

Returns an array of resources associated with the specified web ACL.

" + "documentation":"

This is AWS WAF Classic Regional documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of resources associated with the specified web ACL.

" }, "ListRuleGroups":{ "name":"ListRuleGroups", @@ -942,7 +959,7 @@ "errors":[ {"shape":"WAFInternalErrorException"} ], - "documentation":"

Returns an array of RuleGroup objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RuleGroup objects.

" }, "ListRules":{ "name":"ListRules", @@ -956,7 +973,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of RuleSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RuleSummary objects.

" }, "ListSizeConstraintSets":{ "name":"ListSizeConstraintSets", @@ -970,7 +987,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of SizeConstraintSetSummary objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of SizeConstraintSetSummary objects.

" }, "ListSqlInjectionMatchSets":{ "name":"ListSqlInjectionMatchSets", @@ -984,7 +1001,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of SqlInjectionMatchSet objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of SqlInjectionMatchSet objects.

" }, "ListSubscribedRuleGroups":{ "name":"ListSubscribedRuleGroups", @@ -998,7 +1015,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInternalErrorException"} ], - "documentation":"

Returns an array of RuleGroup objects that you are subscribed to.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of RuleGroup objects that you are subscribed to.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1015,7 +1032,8 @@ {"shape":"WAFBadRequestException"}, {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} - ] + ], + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Retrieves the tags associated with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

Tagging is only available through the API, SDKs, and CLI. You can't manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.

" }, "ListWebACLs":{ "name":"ListWebACLs", @@ -1029,7 +1047,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of WebACLSummary objects in the response.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of WebACLSummary objects in the response.

" }, "ListXssMatchSets":{ "name":"ListXssMatchSets", @@ -1043,7 +1061,7 @@ {"shape":"WAFInternalErrorException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Returns an array of XssMatchSet objects.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returns an array of XssMatchSet objects.

" }, "PutLoggingConfiguration":{ "name":"PutLoggingConfiguration", @@ -1059,7 +1077,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFServiceLinkedRoleErrorException"} ], - "documentation":"

Associates a LoggingConfiguration with a specified web ACL.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the region that you are operating. However, if you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Associates a LoggingConfiguration with a specified web ACL.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the region that you are operating. However, if you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" }, "PutPermissionPolicy":{ "name":"PutPermissionPolicy", @@ -1075,7 +1093,7 @@ {"shape":"WAFNonexistentItemException"}, {"shape":"WAFInvalidPermissionPolicyException"} ], - "documentation":"

Attaches a IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.

The PutPermissionPolicy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

For more information, see IAM Policies.

An example of a valid policy parameter is shown in the Examples section below.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Attaches an IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.

The PutPermissionPolicy is subject to the following restrictions:

  • You can attach only one policy with each PutPermissionPolicy request.

  • The policy must include an Effect, Action and Principal.

  • Effect must specify Allow.

  • The Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions in the policy will be rejected.

  • The policy cannot include a Resource parameter.

  • The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.

  • The user making the request must be the owner of the RuleGroup.

  • Your policy must be composed using IAM Policy version 2012-10-17.

For more information, see IAM Policies.

An example of a valid policy parameter is shown in the Examples section below.

" }, "TagResource":{ "name":"TagResource", @@ -1093,7 +1111,8 @@ {"shape":"WAFBadRequestException"}, {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} - ] + ], + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Associates tags with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

Tagging is only available through the API, SDKs, and CLI. You can't manage or view tags through the AWS WAF Classic console. You can use this action to tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.

" }, "UntagResource":{ "name":"UntagResource", @@ -1110,7 +1129,8 @@ {"shape":"WAFBadRequestException"}, {"shape":"WAFTagOperationException"}, {"shape":"WAFTagOperationInternalErrorException"} - ] + ], + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

" }, "UpdateByteMatchSet":{ "name":"UpdateByteMatchSet", @@ -1130,7 +1150,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For each ByteMatchTuple object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a ByteMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data type.

  • Where to look, such as at the beginning or the end of a query string.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot. You can then configure AWS WAF to block those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Create a ByteMatchSet. For more information, see CreateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  3. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For each ByteMatchTuple object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a ByteMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to look for. For more information, including how you specify the values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data type.

  • Where to look, such as at the beginning or the end of a query string.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can add a ByteMatchSetUpdate object that matches web requests in which User-Agent headers contain the string BadBot. You can then configure AWS WAF to block those requests.

To create and configure a ByteMatchSet, perform the following steps:

  1. Create a ByteMatchSet. For more information, see CreateByteMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateByteMatchSet request.

  3. Submit an UpdateByteMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateGeoMatchSet":{ "name":"UpdateGeoMatchSet", @@ -1151,7 +1171,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet. For each GeoMatchConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an GeoMatchConstraint object, you delete the existing object and add a new one.

  • The Type. The only valid value for Type is Country.

  • The Value, which is a two character code for the country to add to the GeoMatchConstraint object. Valid codes are listed in GeoMatchConstraint$Value.

To create and configure an GeoMatchSet, perform the following steps:

  1. Submit a CreateGeoMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateGeoMatchSet request.

  3. Submit an UpdateGeoMatchSet request to specify the country that you want AWS WAF to watch for.

When you update an GeoMatchSet, you specify the country that you want to add and/or the country that you want to delete. If you want to change a country, you delete the existing country and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet. For each GeoMatchConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an GeoMatchConstraint object, you delete the existing object and add a new one.

  • The Type. The only valid value for Type is Country.

  • The Value, which is a two character code for the country to add to the GeoMatchConstraint object. Valid codes are listed in GeoMatchConstraint$Value.

To create and configure an GeoMatchSet, perform the following steps:

  1. Submit a CreateGeoMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateGeoMatchSet request.

  3. Submit an UpdateGeoMatchSet request to specify the country that you want AWS WAF to watch for.

When you update an GeoMatchSet, you specify the country that you want to add and/or the country that you want to delete. If you want to change a country, you delete the existing country and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateIPSet":{ "name":"UpdateIPSet", @@ -1172,7 +1192,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an IPSetDescriptor object, you delete the existing object and add a new one.

  • The IP address version, IPv4 or IPv6.

  • The IP address in CIDR notation, for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44).

AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

IPv6 addresses can be represented using any of the following formats:

  • 1111:0000:0000:0000:0000:0000:0000:0111/128

  • 1111:0:0:0:0:0:0:0111/128

  • 1111::0111/128

  • 1111::111/128

You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Submit a CreateIPSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

When you update an IPSet, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.

You can insert a maximum of 1000 addresses in a single request.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change an IPSetDescriptor object, you delete the existing object and add a new one.

  • The IP address version, IPv4 or IPv6.

  • The IP address in CIDR notation, for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44).

AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128. For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

IPv6 addresses can be represented using any of the following formats:

  • 1111:0000:0000:0000:0000:0000:0000:0111/128

  • 1111:0:0:0:0:0:0:0111/128

  • 1111::0111/128

  • 1111::111/128

You use an IPSet to specify which web requests you want to allow or block based on the IP addresses that the requests originated from. For example, if you're receiving a lot of requests from one or a small number of IP addresses and you want to block the requests, you can create an IPSet that specifies those IP addresses, and then configure AWS WAF to block the requests.

To create and configure an IPSet, perform the following steps:

  1. Submit a CreateIPSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch for.

When you update an IPSet, you specify the IP addresses that you want to add and/or the IP addresses that you want to delete. If you want to change an IP address, you delete the existing IP address and add the new one.

You can insert a maximum of 1000 addresses in a single request.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateRateBasedRule":{ "name":"UpdateRateBasedRule", @@ -1193,7 +1213,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes Predicate objects in a rule and updates the RateLimit in the rule.

Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests that you want to block or count. The RateLimit specifies the number of requests every five minutes that triggers the rule.

If you add more than one predicate to a RateBasedRule, a request must match all the predicates and exceed the RateLimit to be counted or blocked. For example, suppose you add the following to a RateBasedRule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

Further, you specify a RateLimit of 15,000.

You then add the RateBasedRule to a WebACL and specify that you want to block requests that satisfy the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot. Further, requests that match these two conditions much be received at a rate of more than 15,000 every five minutes. If the rate drops below this limit, AWS WAF no longer blocks the requests.

As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a RateBasedRule:

  • A ByteMatchSet with FieldToMatch of URI

  • A PositionalConstraint of STARTS_WITH

  • A TargetString of login

Further, you specify a RateLimit of 15,000.

By adding this RateBasedRule to a WebACL, you could limit requests to your login page without affecting the rest of your site.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes Predicate objects in a rule and updates the RateLimit in the rule.

Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests that you want to block or count. The RateLimit specifies the number of requests every five minutes that triggers the rule.

If you add more than one predicate to a RateBasedRule, a request must match all the predicates and exceed the RateLimit to be counted or blocked. For example, suppose you add the following to a RateBasedRule:

  • An IPSet that matches the IP address 192.0.2.44/32

  • A ByteMatchSet that matches BadBot in the User-Agent header

Further, you specify a RateLimit of 1,000.

You then add the RateBasedRule to a WebACL and specify that you want to block requests that satisfy the rule. For a request to be blocked, it must come from the IP address 192.0.2.44 and the User-Agent header in the request must contain the value BadBot. Further, requests that match these two conditions much be received at a rate of more than 1,000 every five minutes. If the rate drops below this limit, AWS WAF no longer blocks the requests.

As a second example, suppose you want to limit requests to a particular page on your site. To do this, you could add the following to a RateBasedRule:

  • A ByteMatchSet with FieldToMatch of URI

  • A PositionalConstraint of STARTS_WITH

  • A TargetString of login

Further, you specify a RateLimit of 1,000.

By adding this RateBasedRule to a WebACL, you could limit requests to your login page without affecting the rest of your site.

" }, "UpdateRegexMatchSet":{ "name":"UpdateRegexMatchSet", @@ -1213,7 +1233,7 @@ {"shape":"WAFInvalidOperationException"}, {"shape":"WAFInvalidAccountException"} ], - "documentation":"

Inserts or deletes RegexMatchTuple objects (filters) in a RegexMatchSet. For each RegexMatchSetUpdate object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a RegexMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspectupdate, such as a query string or the value of the User-Agent header.

  • The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can create a RegexPatternSet that matches any requests with User-Agent headers that contain the string B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexMatchSet, perform the following steps:

  1. Create a RegexMatchSet. For more information, see CreateRegexMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet request.

  3. Submit an UpdateRegexMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the identifier of the RegexPatternSet that contain the regular expression patters you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes RegexMatchTuple objects (filters) in a RegexMatchSet. For each RegexMatchSetUpdate object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a RegexMatchSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to inspectupdate, such as a query string or the value of the User-Agent header.

  • The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

For example, you can create a RegexPatternSet that matches any requests with User-Agent headers that contain the string B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

To create and configure a RegexMatchSet, perform the following steps:

  1. Create a RegexMatchSet. For more information, see CreateRegexMatchSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet request.

  3. Submit an UpdateRegexMatchSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the identifier of the RegexPatternSet that contain the regular expression patters you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateRegexPatternSet":{ "name":"UpdateRegexPatternSet", @@ -1233,7 +1253,7 @@ {"shape":"WAFInvalidAccountException"}, {"shape":"WAFInvalidRegexPatternException"} ], - "documentation":"

Inserts or deletes RegexPatternString objects in a RegexPatternSet. For each RegexPatternString object, you specify the following values:

  • Whether to insert or delete the RegexPatternString.

  • The regular expression pattern that you want to insert or delete. For more information, see RegexPatternSet.

For example, you can create a RegexPatternString such as B[a@]dB[o0]t. AWS WAF will match this RegexPatternString to:

  • BadBot

  • BadB0t

  • B@dBot

  • B@dB0t

To create and configure a RegexPatternSet, perform the following steps:

  1. Create a RegexPatternSet. For more information, see CreateRegexPatternSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexPatternSet request.

  3. Submit an UpdateRegexPatternSet request to specify the regular expression pattern that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes RegexPatternString objects in a RegexPatternSet. For each RegexPatternString object, you specify the following values:

  • Whether to insert or delete the RegexPatternString.

  • The regular expression pattern that you want to insert or delete. For more information, see RegexPatternSet.

For example, you can create a RegexPatternString such as B[a@]dB[o0]t. AWS WAF will match this RegexPatternString to:

  • BadBot

  • BadB0t

  • B@dBot

  • B@dB0t

To create and configure a RegexPatternSet, perform the following steps:

  1. Create a RegexPatternSet. For more information, see CreateRegexPatternSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRegexPatternSet request.

  3. Submit an UpdateRegexPatternSet request to specify the regular expression pattern that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateRule":{ "name":"UpdateRule", @@ -1254,7 +1274,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests that you want to allow, block, or count. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed, blocked, or counted. For example, suppose that you add the following to a Rule:

  • A ByteMatchSet that matches the value BadBot in the User-Agent header

  • An IPSet that matches the IP address 192.0.2.44

You then add the Rule to a WebACL and specify that you want to block requests that satisfy the Rule. For a request to be blocked, the User-Agent header in the request must contain the value BadBot and the request must originate from the IP address 192.0.2.44.

To create and configure a Rule, perform the following steps:

  1. Create and update the predicates that you want to include in the Rule.

  2. Create the Rule. See CreateRule.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  4. Submit an UpdateRule request to add predicates to the Rule.

  5. Create and update a WebACL that contains the Rule. See CreateWebACL.

If you want to replace one ByteMatchSet or IPSet with another, you delete the existing one and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests that you want to allow, block, or count. If you add more than one predicate to a Rule, a request must match all of the specifications to be allowed, blocked, or counted. For example, suppose that you add the following to a Rule:

  • A ByteMatchSet that matches the value BadBot in the User-Agent header

  • An IPSet that matches the IP address 192.0.2.44

You then add the Rule to a WebACL and specify that you want to block requests that satisfy the Rule. For a request to be blocked, the User-Agent header in the request must contain the value BadBot and the request must originate from the IP address 192.0.2.44.

To create and configure a Rule, perform the following steps:

  1. Create and update the predicates that you want to include in the Rule.

  2. Create the Rule. See CreateRule.

  3. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRule request.

  4. Submit an UpdateRule request to add predicates to the Rule.

  5. Create and update a WebACL that contains the Rule. See CreateWebACL.

If you want to replace one ByteMatchSet or IPSet with another, you delete the existing one and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateRuleGroup":{ "name":"UpdateRuleGroup", @@ -1273,7 +1293,7 @@ {"shape":"WAFLimitsExceededException"}, {"shape":"WAFInvalidParameterException"} ], - "documentation":"

Inserts or deletes ActivatedRule objects in a RuleGroup.

You can only insert REGULAR rules into a rule group.

You can have a maximum of ten rules per rule group.

To create and configure a RuleGroup, perform the following steps:

  1. Create and update the Rules that you want to include in the RuleGroup. See CreateRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRuleGroup request.

  3. Submit an UpdateRuleGroup request to add Rules to the RuleGroup.

  4. Create and update a WebACL that contains the RuleGroup. See CreateWebACL.

If you want to replace one Rule with another, you delete the existing one and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes ActivatedRule objects in a RuleGroup.

You can only insert REGULAR rules into a rule group.

You can have a maximum of ten rules per rule group.

To create and configure a RuleGroup, perform the following steps:

  1. Create and update the Rules that you want to include in the RuleGroup. See CreateRule.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateRuleGroup request.

  3. Submit an UpdateRuleGroup request to add Rules to the RuleGroup.

  4. Create and update a WebACL that contains the RuleGroup. See CreateWebACL.

If you want to replace one Rule with another, you delete the existing one and add the new one.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateSizeConstraintSet":{ "name":"UpdateSizeConstraintSet", @@ -1294,7 +1314,7 @@ {"shape":"WAFReferencedItemException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. For each SizeConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a SizeConstraintSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the User-Agent header.

  • Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first 8192 bytes of your request to AWS WAF.

    You can only specify a single type of TextTransformation.

  • A ComparisonOperator used for evaluating the selected part of the request against the specified Size, such as equals, greater than, less than, and so on.

  • The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.

For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  3. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. For each SizeConstraint object, you specify the following values:

  • Whether to insert or delete the object from the array. If you want to change a SizeConstraintSetUpdate object, you delete the existing object and add a new one.

  • The part of a web request that you want AWS WAF to evaluate, such as the length of a query string or the length of the User-Agent header.

  • Whether to perform any transformations on the request, such as converting it to lowercase, before checking its length. Note that transformations of the request body are not supported because the AWS resource forwards only the first 8192 bytes of your request to AWS WAF.

    You can only specify a single type of TextTransformation.

  • A ComparisonOperator used for evaluating the selected part of the request against the specified Size, such as equals, greater than, less than, and so on.

  • The length, in bytes, that you want AWS WAF to watch for in selected part of the request. The length is computed after applying the transformation.

For example, you can add a SizeConstraintSetUpdate object that matches web requests in which the length of the User-Agent header is greater than 100 bytes. You can then configure AWS WAF to block those requests.

To create and configure a SizeConstraintSet, perform the following steps:

  1. Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet request.

  3. Submit an UpdateSizeConstraintSet request to specify the part of the request that you want AWS WAF to inspect (for example, the header or the URI) and the value that you want AWS WAF to watch for.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateSqlInjectionMatchSet":{ "name":"UpdateSqlInjectionMatchSet", @@ -1314,7 +1334,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a SqlInjectionMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.

    You can only specify a single type of TextTransformation.

You use SqlInjectionMatchSet objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Submit a CreateSqlInjectionMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change a SqlInjectionMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for snippets of malicious SQL code.

    You can only specify a single type of TextTransformation.

You use SqlInjectionMatchSet objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you're receiving requests that contain snippets of SQL code in the query string and you want to block the requests, you can create a SqlInjectionMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure a SqlInjectionMatchSet, perform the following steps:

  1. Submit a CreateSqlInjectionMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateSqlInjectionMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for snippets of SQL code.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateWebACL":{ "name":"UpdateWebACL", @@ -1336,7 +1356,7 @@ {"shape":"WAFLimitsExceededException"}, {"shape":"WAFSubscriptionNotFoundException"} ], - "documentation":"

Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies web requests that you want to allow, block, or count. When you update a WebACL, you specify the following values:

  • A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn't match the criteria in any of the Rules in a WebACL.

  • The Rules that you want to add or delete. If you want to replace one Rule with another, you delete the existing Rule and add the new one.

  • For each Rule, whether you want AWS WAF to allow requests, block requests, or count requests that match the conditions in the Rule.

  • The order in which you want AWS WAF to evaluate the Rules in a WebACL. If you add more than one Rule to a WebACL, AWS WAF evaluates each request against the Rules in order based on the value of Priority. (The Rule that has the lowest value for Priority is evaluated first.) When a web request matches all the predicates (such as ByteMatchSets and IPSets) in a Rule, AWS WAF immediately takes the corresponding action, allow or block, and doesn't evaluate the request against the remaining Rules in the WebACL, if any.

To create and configure a WebACL, perform the following steps:

  1. Create and update the predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.

  2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.

  3. Create a WebACL. See CreateWebACL.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.

  5. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

    The ActivatedRule can be a rule group. If you specify a rule group as your ActivatedRule, you can exclude specific rules from that rule group.

    If you already have a rule group associated with a web ACL and want to submit an UpdateWebACL request to exclude certain rules from that rule group, you must first remove the rule group from the web ACL, the re-insert it again, specifying the excluded rules. For details, see ActivatedRule$ExcludedRules.

Be aware that if you try to add a RATE_BASED rule to a web ACL without setting the rule type when first creating the rule, the UpdateWebACL request will fail because the request tries to add a REGULAR rule (the default rule type) with the specified ID, which does not exist.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies web requests that you want to allow, block, or count. When you update a WebACL, you specify the following values:

  • A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn't match the criteria in any of the Rules in a WebACL.

  • The Rules that you want to add or delete. If you want to replace one Rule with another, you delete the existing Rule and add the new one.

  • For each Rule, whether you want AWS WAF to allow requests, block requests, or count requests that match the conditions in the Rule.

  • The order in which you want AWS WAF to evaluate the Rules in a WebACL. If you add more than one Rule to a WebACL, AWS WAF evaluates each request against the Rules in order based on the value of Priority. (The Rule that has the lowest value for Priority is evaluated first.) When a web request matches all the predicates (such as ByteMatchSets and IPSets) in a Rule, AWS WAF immediately takes the corresponding action, allow or block, and doesn't evaluate the request against the remaining Rules in the WebACL, if any.

To create and configure a WebACL, perform the following steps:

  1. Create and update the predicates that you want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet.

  2. Create and update the Rules that you want to include in the WebACL. For more information, see CreateRule and UpdateRule.

  3. Create a WebACL. See CreateWebACL.

  4. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateWebACL request.

  5. Submit an UpdateWebACL request to specify the Rules that you want to include in the WebACL, to specify the default action, and to associate the WebACL with a CloudFront distribution.

    The ActivatedRule can be a rule group. If you specify a rule group as your ActivatedRule , you can exclude specific rules from that rule group.

    If you already have a rule group associated with a web ACL and want to submit an UpdateWebACL request to exclude certain rules from that rule group, you must first remove the rule group from the web ACL, the re-insert it again, specifying the excluded rules. For details, see ActivatedRule$ExcludedRules .

Be aware that if you try to add a RATE_BASED rule to a web ACL without setting the rule type when first creating the rule, the UpdateWebACL request will fail because the request tries to add a REGULAR rule (the default rule type) with the specified ID, which does not exist.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" }, "UpdateXssMatchSet":{ "name":"UpdateXssMatchSet", @@ -1356,7 +1376,7 @@ {"shape":"WAFStaleDataException"}, {"shape":"WAFLimitsExceededException"} ], - "documentation":"

Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For each XssMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change an XssMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks.

    You can only specify a single type of TextTransformation.

You use XssMatchSet objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you're receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an XssMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure an XssMatchSet, perform the following steps:

  1. Submit a CreateXssMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateXssMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For each XssMatchTuple object, you specify the following values:

  • Action: Whether to insert the object into or delete the object from the array. To change an XssMatchTuple, you delete the existing object and add a new one.

  • FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or custom query parameter, the name of the header or parameter.

  • TextTransformation: Which text transformation, if any, to perform on the web request before inspecting the request for cross-site scripting attacks.

    You can only specify a single type of TextTransformation.

You use XssMatchSet objects to specify which CloudFront requests that you want to allow, block, or count. For example, if you're receiving requests that contain cross-site scripting attacks in the request body and you want to block the requests, you can create an XssMatchSet with the applicable settings, and then configure AWS WAF to block the requests.

To create and configure an XssMatchSet, perform the following steps:

  1. Submit a CreateXssMatchSet request.

  2. Use GetChangeToken to get the change token that you provide in the ChangeToken parameter of an UpdateIPSet request.

  3. Submit an UpdateXssMatchSet request to specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks.

For more information about how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide.

" } }, "shapes":{ @@ -1393,7 +1413,7 @@ "documentation":"

An array of rules to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup.

Sometimes it is necessary to troubleshoot rule groups that are blocking traffic unexpectedly (false positives). One troubleshooting technique is to identify the specific rule within the rule group that is blocking the legitimate traffic and then disable (exclude) that particular rule. You can exclude rules from both your own rule groups and AWS Marketplace rule groups that have been associated with a web ACL.

Specifying ExcludedRules does not remove those rules from the rule group. Rather, it changes the action for the rules to COUNT. Therefore, requests that match an ExcludedRule are counted but not blocked. The RuleGroup owner will receive COUNT metrics for each ExcludedRule.

If you want to exclude rules from a rule group that is already associated with a web ACL, perform the following steps:

  1. Use the AWS WAF logs to identify the IDs of the rules that you want to exclude. For more information about the logs, see Logging Web ACL Traffic Information.

  2. Submit an UpdateWebACL request that has two actions:

    • The first action deletes the existing rule group from the web ACL. That is, in the UpdateWebACL request, the first Updates:Action should be DELETE and Updates:ActivatedRule:RuleId should be the rule group that contains the rules that you want to exclude.

    • The second action inserts the same rule group back in, but specifying the rules to exclude. That is, the second Updates:Action should be INSERT, Updates:ActivatedRule:RuleId should be the rule group that you just removed, and ExcludedRules should contain the rules that you want to exclude.

" } }, - "documentation":"

The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL, and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW, BLOCK, or COUNT).

To specify whether to insert or delete a Rule, use the Action parameter in the WebACLUpdate data type.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL, and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW, BLOCK, or COUNT).

To specify whether to insert or delete a Rule, use the Action parameter in the WebACLUpdate data type.

" }, "ActivatedRules":{ "type":"list", @@ -1441,7 +1461,7 @@ "documentation":"

Specifies the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.

" } }, - "documentation":"

In a GetByteMatchSet request, ByteMatchSet is a complex type that contains the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified when you updated the ByteMatchSet.

A complex type that contains ByteMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In a GetByteMatchSet request, ByteMatchSet is a complex type that contains the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified when you updated the ByteMatchSet.

A complex type that contains ByteMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

" }, "ByteMatchSetSummaries":{ "type":"list", @@ -1463,7 +1483,7 @@ "documentation":"

A friendly name or description of the ByteMatchSet. You can't change Name after you create a ByteMatchSet.

" } }, - "documentation":"

Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the Name and ByteMatchSetId for one ByteMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the Name and ByteMatchSetId for one ByteMatchSet.

" }, "ByteMatchSetUpdate":{ "type":"structure", @@ -1481,7 +1501,7 @@ "documentation":"

Information about the part of a web request that you want AWS WAF to inspect and the value that you want AWS WAF to search for. If you specify DELETE for the value of Action, the ByteMatchTuple values must exactly match the values in the ByteMatchTuple that you want to delete from the ByteMatchSet.

" } }, - "documentation":"

In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple.

" }, "ByteMatchSetUpdates":{ "type":"list", @@ -1508,14 +1528,14 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on TargetString before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" }, "PositionalConstraint":{ "shape":"PositionalConstraint", "documentation":"

Within the portion of a web request that you want to search (for example, in the query string, if any), specify where you want AWS WAF to search. Valid values include the following:

CONTAINS

The specified part of the web request must include the value of TargetString, but the location doesn't matter.

CONTAINS_WORD

The specified part of the web request must include the value of TargetString, and TargetString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means one of the following:

  • TargetString exactly matches the value of the specified part of the web request, such as the value of a header.

  • TargetString is at the beginning of the specified part of the web request and is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot;.

  • TargetString is at the end of the specified part of the web request and is preceded by a character other than an alphanumeric character or underscore (_), for example, ;BadBot.

  • TargetString is in the middle of the specified part of the web request and is preceded and followed by characters other than alphanumeric characters or underscore (_), for example, -BadBot;.

EXACTLY

The value of the specified part of the web request must exactly match the value of TargetString.

STARTS_WITH

The value of TargetString must appear at the beginning of the specified part of the web request.

ENDS_WITH

The value of TargetString must appear at the end of the specified part of the web request.

" } }, - "documentation":"

The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings.

" }, "ByteMatchTuples":{ "type":"list", @@ -1530,7 +1550,9 @@ }, "ChangeToken":{ "type":"string", - "min":1 + "max":128, + "min":1, + "pattern":".*\\S.*" }, "ChangeTokenStatus":{ "type":"string", @@ -1672,7 +1694,10 @@ "shape":"ChangeToken", "documentation":"

The ChangeToken that you used to submit the CreateRateBasedRule request. You can also use this value to query the status of the request. For more information, see GetChangeTokenStatus.

" }, - "Tags":{"shape":"TagList"} + "Tags":{ + "shape":"TagList", + "documentation":"

" + } } }, "CreateRateBasedRuleResponse":{ @@ -1768,7 +1793,10 @@ "shape":"ChangeToken", "documentation":"

The value returned by the most recent call to GetChangeToken.

" }, - "Tags":{"shape":"TagList"} + "Tags":{ + "shape":"TagList", + "documentation":"

" + } } }, "CreateRuleGroupResponse":{ @@ -1804,7 +1832,10 @@ "shape":"ChangeToken", "documentation":"

The value returned by the most recent call to GetChangeToken.

" }, - "Tags":{"shape":"TagList"} + "Tags":{ + "shape":"TagList", + "documentation":"

" + } } }, "CreateRuleResponse":{ @@ -1882,6 +1913,38 @@ }, "documentation":"

The response to a CreateSqlInjectionMatchSet request.

" }, + "CreateWebACLMigrationStackRequest":{ + "type":"structure", + "required":[ + "WebACLId", + "S3BucketName", + "IgnoreUnsupportedType" + ], + "members":{ + "WebACLId":{ + "shape":"ResourceId", + "documentation":"

The UUID of the WAF Classic web ACL that you want to migrate to WAF v2.

" + }, + "S3BucketName":{ + "shape":"S3BucketName", + "documentation":"

The name of the Amazon S3 bucket to store the CloudFormation template in. The S3 bucket must be configured as follows for the migration:

  • The bucket name must start with aws-waf-migration-. For example, aws-waf-migration-my-web-acl.

  • The bucket must be in the Region where you are deploying the template. For example, for a web ACL in us-west-2, you must use an Amazon S3 bucket in us-west-2 and you must deploy the template stack to us-west-2.

  • The bucket policies must permit the migration process to write data. For listings of the bucket policies, see the Examples section.

" + }, + "IgnoreUnsupportedType":{ + "shape":"IgnoreUnsupportedType", + "documentation":"

Indicates whether to exclude entities that can't be migrated or to stop the migration. Set this to true to ignore unsupported entities in the web ACL during the migration. Otherwise, if AWS WAF encounters unsupported entities, it stops the process and throws an exception.

" + } + } + }, + "CreateWebACLMigrationStackResponse":{ + "type":"structure", + "required":["S3ObjectUrl"], + "members":{ + "S3ObjectUrl":{ + "shape":"S3ObjectUrl", + "documentation":"

The URL of the template created in Amazon S3.

" + } + } + }, "CreateWebACLRequest":{ "type":"structure", "required":[ @@ -1907,7 +1970,10 @@ "shape":"ChangeToken", "documentation":"

The value returned by the most recent call to GetChangeToken.

" }, - "Tags":{"shape":"TagList"} + "Tags":{ + "shape":"TagList", + "documentation":"

" + } } }, "CreateWebACLResponse":{ @@ -2316,6 +2382,7 @@ "members":{ } }, + "ErrorReason":{"type":"string"}, "ExcludedRule":{ "type":"structure", "required":["RuleId"], @@ -2325,7 +2392,7 @@ "documentation":"

The unique identifier for the rule to exclude from the rule group.

" } }, - "documentation":"

The rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup. The rule must belong to the RuleGroup that is specified by the ActivatedRule.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The rule to exclude from a rule group. This is applicable only when the ActivatedRule refers to a RuleGroup. The rule must belong to the RuleGroup that is specified by the ActivatedRule.

" }, "ExcludedRules":{ "type":"list", @@ -2344,7 +2411,7 @@ "documentation":"

When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. The name of the header is not case sensitive.

When the value of Type is SINGLE_QUERY_ARG, enter the name of the parameter that you want AWS WAF to search, for example, UserName or SalesRegion. The parameter name is not case sensitive.

If the value of Type is any other value, omit Data.

" } }, - "documentation":"

Specifies where in a web request to look for TargetString.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies where in a web request to look for TargetString.

" }, "GeoMatchConstraint":{ "type":"structure", @@ -2362,7 +2429,7 @@ "documentation":"

The country that you want AWS WAF to search for.

" } }, - "documentation":"

The country from which web requests originate that you want AWS WAF to search for.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The country from which web requests originate that you want AWS WAF to search for.

" }, "GeoMatchConstraintType":{ "type":"string", @@ -2646,7 +2713,7 @@ "documentation":"

An array of GeoMatchConstraint objects, which contain the country that you want AWS WAF to search for.

" } }, - "documentation":"

Contains one or more countries that AWS WAF will search for.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains one or more countries that AWS WAF will search for.

" }, "GeoMatchSetSummaries":{ "type":"list", @@ -2668,7 +2735,7 @@ "documentation":"

A friendly name or description of the GeoMatchSet. You can't change the name of an GeoMatchSet after you create it.

" } }, - "documentation":"

Contains the identifier and the name of the GeoMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the identifier and the name of the GeoMatchSet.

" }, "GeoMatchSetUpdate":{ "type":"structure", @@ -2686,7 +2753,7 @@ "documentation":"

The country from which web requests originate that you want AWS WAF to search for.

" } }, - "documentation":"

Specifies the type of update to perform to an GeoMatchSet with UpdateGeoMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the type of update to perform to an GeoMatchSet with UpdateGeoMatchSet.

" }, "GeoMatchSetUpdates":{ "type":"list", @@ -2967,7 +3034,7 @@ }, "TimeWindow":{ "shape":"TimeWindow", - "documentation":"

The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. Specify the date and time in the following format: \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" + "documentation":"

The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" }, "MaxItems":{ "shape":"GetSampledRequestsMaxItems", @@ -2988,7 +3055,7 @@ }, "TimeWindow":{ "shape":"TimeWindow", - "documentation":"

Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests.

" + "documentation":"

Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests. Times are in Coordinated Universal Time (UTC) format.

" } } }, @@ -3103,7 +3170,7 @@ "documentation":"

The value of one of the headers in the sampled web request.

" } }, - "documentation":"

The response from a GetSampledRequests request includes an HTTPHeader complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests that were returned by GetSampledRequests.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The response from a GetSampledRequests request includes an HTTPHeader complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests that were returned by GetSampledRequests.

" }, "HTTPHeaders":{ "type":"list", @@ -3138,7 +3205,7 @@ "documentation":"

A complex type that contains two values for each header in the sampled web request: the name of the header and the value of the header.

" } }, - "documentation":"

The response from a GetSampledRequests request includes an HTTPRequest complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests that were returned by GetSampledRequests.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The response from a GetSampledRequests request includes an HTTPRequest complex type that appears as Request in the response syntax. HTTPRequest contains information about one of the web requests that were returned by GetSampledRequests.

" }, "HTTPVersion":{"type":"string"}, "HeaderName":{"type":"string"}, @@ -3163,7 +3230,7 @@ "documentation":"

The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) that web requests originate from. If the WebACL is associated with a CloudFront distribution and the viewer did not use an HTTP proxy or a load balancer to send the request, this is the value of the c-ip field in the CloudFront access logs.

" } }, - "documentation":"

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/32. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128.

To specify an individual IP address, you specify the four-part IP address followed by a /32, for example, 192.0.2.0/32. To block a range of IP addresses, you can specify /8 or any range between /16 through /32 (for IPv4) or /24, /32, /48, /56, /64, or /128 (for IPv6). For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

" }, "IPSetDescriptor":{ "type":"structure", @@ -3181,7 +3248,7 @@ "documentation":"

Specify an IPv4 address by using CIDR notation. For example:

  • To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32.

  • To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24.

For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

Specify an IPv6 address by using CIDR notation. For example:

  • To configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128.

  • To configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64.

" } }, - "documentation":"

Specifies the IP address type (IPV4 or IPV6) and the IP address range (in CIDR format) that web requests originate from.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the IP address type (IPV4 or IPV6) and the IP address range (in CIDR format) that web requests originate from.

" }, "IPSetDescriptorType":{ "type":"string", @@ -3190,7 +3257,12 @@ "IPV6" ] }, - "IPSetDescriptorValue":{"type":"string"}, + "IPSetDescriptorValue":{ + "type":"string", + "max":50, + "min":1, + "pattern":".*\\S.*" + }, "IPSetDescriptors":{ "type":"list", "member":{"shape":"IPSetDescriptor"} @@ -3215,7 +3287,7 @@ "documentation":"

A friendly name or description of the IPSet. You can't change the name of an IPSet after you create it.

" } }, - "documentation":"

Contains the identifier and the name of the IPSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the identifier and the name of the IPSet.

" }, "IPSetUpdate":{ "type":"structure", @@ -3233,7 +3305,7 @@ "documentation":"

The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) that web requests originate from.

" } }, - "documentation":"

Specifies the type of update to perform to an IPSet with UpdateIPSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the type of update to perform to an IPSet with UpdateIPSet.

" }, "IPSetUpdates":{ "type":"list", @@ -3241,6 +3313,7 @@ "min":1 }, "IPString":{"type":"string"}, + "IgnoreUnsupportedType":{"type":"boolean"}, "ListActivatedRulesInRuleGroupRequest":{ "type":"structure", "members":{ @@ -3612,16 +3685,31 @@ "type":"structure", "required":["ResourceARN"], "members":{ - "NextMarker":{"shape":"NextMarker"}, - "Limit":{"shape":"PaginationLimit"}, - "ResourceARN":{"shape":"ResourceArn"} + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

" + }, + "Limit":{ + "shape":"PaginationLimit", + "documentation":"

" + }, + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

" + } } }, "ListTagsForResourceResponse":{ "type":"structure", "members":{ - "NextMarker":{"shape":"NextMarker"}, - "TagInfoForResource":{"shape":"TagInfoForResource"} + "NextMarker":{ + "shape":"NextMarker", + "documentation":"

" + }, + "TagInfoForResource":{ + "shape":"TagInfoForResource", + "documentation":"

" + } } }, "ListWebACLsRequest":{ @@ -3704,7 +3792,7 @@ "documentation":"

The parts of the request that you want redacted from the logs. For example, if you redact the cookie field, the cookie field in the firehose will be xxx.

" } }, - "documentation":"

The Amazon Kinesis Data Firehose, RedactedFields information, and the web ACL Amazon Resource Name (ARN).

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The Amazon Kinesis Data Firehose, RedactedFields information, and the web ACL Amazon Resource Name (ARN).

" }, "LoggingConfigurations":{ "type":"list", @@ -3715,7 +3803,12 @@ "type":"list", "member":{"shape":"ManagedKey"} }, - "MatchFieldData":{"type":"string"}, + "MatchFieldData":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*\\S.*" + }, "MatchFieldType":{ "type":"string", "enum":[ @@ -3728,11 +3821,30 @@ "ALL_QUERY_ARGS" ] }, - "MetricName":{"type":"string"}, + "MetricName":{ + "type":"string", + "max":128, + "min":1, + "pattern":".*\\S.*" + }, + "MigrationErrorType":{ + "type":"string", + "enum":[ + "ENTITY_NOT_SUPPORTED", + "ENTITY_NOT_FOUND", + "S3_BUCKET_NO_PERMISSION", + "S3_BUCKET_NOT_ACCESSIBLE", + "S3_BUCKET_NOT_FOUND", + "S3_BUCKET_INVALID_REGION", + "S3_INTERNAL_ERROR" + ] + }, "Negated":{"type":"boolean"}, "NextMarker":{ "type":"string", - "min":1 + "max":1224, + "min":1, + "pattern":".*\\S.*" }, "PaginationLimit":{ "type":"integer", @@ -3777,7 +3889,9 @@ }, "PolicyString":{ "type":"string", - "min":1 + "max":395000, + "min":1, + "pattern":".*\\S.*" }, "PopulationSize":{"type":"long"}, "PositionalConstraint":{ @@ -3811,7 +3925,7 @@ "documentation":"

A unique identifier for a predicate in a Rule, such as ByteMatchSetId or IPSetId. The ID is returned by the corresponding Create or List command.

" } }, - "documentation":"

Specifies the ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, RegexMatchSet, GeoMatchSet, and SizeConstraintSet objects that you want to add to a Rule and, for each object, indicates whether you want to negate the settings, for example, requests that do NOT originate from the IP address 192.0.2.44.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, RegexMatchSet, GeoMatchSet, and SizeConstraintSet objects that you want to add to a Rule and, for each object, indicates whether you want to negate the settings, for example, requests that do NOT originate from the IP address 192.0.2.44.

" }, "PredicateType":{ "type":"string", @@ -3904,7 +4018,7 @@ "documentation":"

The maximum number of requests, which have an identical value in the field specified by the RateKey, allowed in a five-minute period. If the number of requests exceeds the RateLimit and the other predicates specified in the rule are also met, AWS WAF triggers the action that is specified for this rule.

" } }, - "documentation":"

A RateBasedRule is identical to a regular Rule, with one addition: a RateBasedRule counts the number of requests that arrive from a specified IP address every five minutes. For example, based on recent requests that you've seen from an attacker, you might create a RateBasedRule that includes the following conditions:

  • The requests come from 192.0.2.44.

  • They contain the value BadBot in the User-Agent header.

In the rule, you also define the rate limit as 15,000.

Requests that meet both of these conditions and exceed 15,000 requests every five minutes trigger the rule's action (block or count), which is defined in the web ACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A RateBasedRule is identical to a regular Rule, with one addition: a RateBasedRule counts the number of requests that arrive from a specified IP address every five minutes. For example, based on recent requests that you've seen from an attacker, you might create a RateBasedRule that includes the following conditions:

  • The requests come from 192.0.2.44.

  • They contain the value BadBot in the User-Agent header.

In the rule, you also define the rate limit as 1,000.

Requests that meet both of these conditions and exceed 1,000 requests every five minutes trigger the rule's action (block or count), which is defined in the web ACL.

" }, "RateKey":{ "type":"string", @@ -3935,7 +4049,7 @@ "documentation":"

Contains an array of RegexMatchTuple objects. Each RegexMatchTuple object contains:

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

" } }, - "documentation":"

In a GetRegexMatchSet request, RegexMatchSet is a complex type that contains the RegexMatchSetId and Name of a RegexMatchSet, and the values that you specified when you updated the RegexMatchSet.

The values are contained in a RegexMatchTuple object, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a RegexMatchSet contains more than one RegexMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In a GetRegexMatchSet request, RegexMatchSet is a complex type that contains the RegexMatchSetId and Name of a RegexMatchSet, and the values that you specified when you updated the RegexMatchSet.

The values are contained in a RegexMatchTuple object, which specify the parts of web requests that you want AWS WAF to inspect and the values that you want AWS WAF to search for. If a RegexMatchSet contains more than one RegexMatchTuple object, a request needs to match the settings in only one ByteMatchTuple to be considered a match.

" }, "RegexMatchSetSummaries":{ "type":"list", @@ -3957,7 +4071,7 @@ "documentation":"

A friendly name or description of the RegexMatchSet. You can't change Name after you create a RegexMatchSet.

" } }, - "documentation":"

Returned by ListRegexMatchSets. Each RegexMatchSetSummary object includes the Name and RegexMatchSetId for one RegexMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returned by ListRegexMatchSets. Each RegexMatchSetSummary object includes the Name and RegexMatchSetId for one RegexMatchSet.

" }, "RegexMatchSetUpdate":{ "type":"structure", @@ -3975,7 +4089,7 @@ "documentation":"

Information about the part of a web request that you want AWS WAF to inspect and the identifier of the regular expression (regex) pattern that you want AWS WAF to search for. If you specify DELETE for the value of Action, the RegexMatchTuple values must exactly match the values in the RegexMatchTuple that you want to delete from the RegexMatchSet.

" } }, - "documentation":"

In an UpdateRegexMatchSet request, RegexMatchSetUpdate specifies whether to insert or delete a RegexMatchTuple and includes the settings for the RegexMatchTuple.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In an UpdateRegexMatchSet request, RegexMatchSetUpdate specifies whether to insert or delete a RegexMatchTuple and includes the settings for the RegexMatchTuple.

" }, "RegexMatchSetUpdates":{ "type":"list", @@ -4003,7 +4117,7 @@ "documentation":"

The RegexPatternSetId for a RegexPatternSet. You use RegexPatternSetId to get information about a RegexPatternSet (see GetRegexPatternSet), update a RegexPatternSet (see UpdateRegexPatternSet), insert a RegexPatternSet into a RegexMatchSet or delete one from a RegexMatchSet (see UpdateRegexMatchSet), and delete an RegexPatternSet from AWS WAF (see DeleteRegexPatternSet).

RegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets.

" } }, - "documentation":"

The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. Each RegexMatchTuple object contains:

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The regular expression pattern that you want AWS WAF to search for in web requests, the location in requests that you want AWS WAF to search, and other settings. Each RegexMatchTuple object contains:

  • The part of a web request that you want AWS WAF to inspect, such as a query string or the value of the User-Agent header.

  • The identifier of the pattern (a regular expression) that you want AWS WAF to look for. For more information, see RegexPatternSet.

  • Whether to perform any conversions on the request, such as converting it to lowercase, before inspecting it for the specified string.

" }, "RegexMatchTuples":{ "type":"list", @@ -4029,7 +4143,7 @@ "documentation":"

Specifies the regular expression (regex) patterns that you want AWS WAF to search for, such as B[a@]dB[o0]t.

" } }, - "documentation":"

The RegexPatternSet specifies the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The RegexPatternSet specifies the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests.

" }, "RegexPatternSetSummaries":{ "type":"list", @@ -4051,7 +4165,7 @@ "documentation":"

A friendly name or description of the RegexPatternSet. You can't change Name after you create a RegexPatternSet.

" } }, - "documentation":"

Returned by ListRegexPatternSets. Each RegexPatternSetSummary object includes the Name and RegexPatternSetId for one RegexPatternSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Returned by ListRegexPatternSets. Each RegexPatternSetSummary object includes the Name and RegexPatternSetId for one RegexPatternSet.

" }, "RegexPatternSetUpdate":{ "type":"structure", @@ -4069,7 +4183,7 @@ "documentation":"

Specifies the regular expression (regex) pattern that you want AWS WAF to search for, such as B[a@]dB[o0]t.

" } }, - "documentation":"

In an UpdateRegexPatternSet request, RegexPatternSetUpdate specifies whether to insert or delete a RegexPatternString and includes the settings for the RegexPatternString.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In an UpdateRegexPatternSet request, RegexPatternSetUpdate specifies whether to insert or delete a RegexPatternString and includes the settings for the RegexPatternString.

" }, "RegexPatternSetUpdates":{ "type":"list", @@ -4078,7 +4192,9 @@ }, "RegexPatternString":{ "type":"string", - "min":1 + "max":512, + "min":1, + "pattern":".*" }, "RegexPatternStrings":{ "type":"list", @@ -4088,7 +4204,8 @@ "ResourceArn":{ "type":"string", "max":1224, - "min":1 + "min":1, + "pattern":".*\\S.*" }, "ResourceArns":{ "type":"list", @@ -4097,12 +4214,14 @@ "ResourceId":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":".*\\S.*" }, "ResourceName":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":".*\\S.*" }, "ResourceType":{ "type":"string", @@ -4135,7 +4254,7 @@ "documentation":"

The Predicates object contains one Predicate element for each ByteMatchSet, IPSet, or SqlInjectionMatchSet object that you want to include in a Rule.

" } }, - "documentation":"

A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects that identify the web requests that you want to allow, block, or count. For example, you might create a Rule that includes the following predicates:

  • An IPSet that causes AWS WAF to search for web requests that originate from the IP address 192.0.2.44

  • A ByteMatchSet that causes AWS WAF to search for web requests for which the value of the User-Agent header is BadBot.

To match the settings in this Rule, a request must originate from 192.0.2.44 AND include a User-Agent header for which the value is BadBot.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects that identify the web requests that you want to allow, block, or count. For example, you might create a Rule that includes the following predicates:

  • An IPSet that causes AWS WAF to search for web requests that originate from the IP address 192.0.2.44

  • A ByteMatchSet that causes AWS WAF to search for web requests for which the value of the User-Agent header is BadBot.

To match the settings in this Rule, a request must originate from 192.0.2.44 AND include a User-Agent header for which the value is BadBot.

" }, "RuleGroup":{ "type":"structure", @@ -4154,7 +4273,7 @@ "documentation":"

A friendly name or description for the metrics for this RuleGroup. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including \"All\" and \"Default_Action.\" You can't change the name of the metric after you create the RuleGroup.

" } }, - "documentation":"

A collection of predefined rules that you can add to a web ACL.

Rule groups are subject to the following limits:

  • Three rule groups per account. You can request an increase to this limit by contacting customer support.

  • One rule group per web ACL.

  • Ten rules per rule group.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A collection of predefined rules that you can add to a web ACL.

Rule groups are subject to the following limits:

  • Three rule groups per account. You can request an increase to this limit by contacting customer support.

  • One rule group per web ACL.

  • Ten rules per rule group.

" }, "RuleGroupSummaries":{ "type":"list", @@ -4176,7 +4295,7 @@ "documentation":"

A friendly name or description of the RuleGroup. You can't change the name of a RuleGroup after you create it.

" } }, - "documentation":"

Contains the identifier and the friendly name or description of the RuleGroup.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the identifier and the friendly name or description of the RuleGroup.

" }, "RuleGroupUpdate":{ "type":"structure", @@ -4194,7 +4313,7 @@ "documentation":"

The ActivatedRule object specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL, and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW, BLOCK, or COUNT).

" } }, - "documentation":"

Specifies an ActivatedRule and indicates whether you want to add it to a RuleGroup or delete it from a RuleGroup.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies an ActivatedRule and indicates whether you want to add it to a RuleGroup or delete it from a RuleGroup.

" }, "RuleGroupUpdates":{ "type":"list", @@ -4222,7 +4341,7 @@ "documentation":"

A friendly name or description of the Rule. You can't change the name of a Rule after you create it.

" } }, - "documentation":"

Contains the identifier and the friendly name or description of the Rule.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the identifier and the friendly name or description of the Rule.

" }, "RuleUpdate":{ "type":"structure", @@ -4240,12 +4359,22 @@ "documentation":"

The ID of the Predicate (such as an IPSet) that you want to add to a Rule.

" } }, - "documentation":"

Specifies a Predicate (such as an IPSet) and indicates whether you want to add it to a Rule or delete it from a Rule.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies a Predicate (such as an IPSet) and indicates whether you want to add it to a Rule or delete it from a Rule.

" }, "RuleUpdates":{ "type":"list", "member":{"shape":"RuleUpdate"} }, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^aws-waf-migration-[0-9A-Za-z\\.\\-_]*" + }, + "S3ObjectUrl":{ + "type":"string", + "min":1 + }, "SampleWeight":{ "type":"long", "min":0 @@ -4278,7 +4407,7 @@ "documentation":"

This value is returned if the GetSampledRequests request specifies the ID of a RuleGroup rather than the ID of an individual rule. RuleWithinRuleGroup is the rule within the specified RuleGroup that matched the request listed in the response.

" } }, - "documentation":"

The response from a GetSampledRequests request includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains one SampledHTTPRequest object for each web request that is returned by GetSampledRequests.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The response from a GetSampledRequests request includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains one SampledHTTPRequest object for each web request that is returned by GetSampledRequests.

" }, "SampledHTTPRequests":{ "type":"list", @@ -4304,7 +4433,7 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

Note that if you choose BODY for the value of Type, you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.

NONE

Specify NONE if you don't want to perform any text transformations.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.

You can only specify a single type of TextTransformation.

Note that if you choose BODY for the value of Type, you must choose NONE for TextTransformation because CloudFront forwards only the first 8192 bytes for inspection.

NONE

Specify NONE if you don't want to perform any text transformations.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

" }, "ComparisonOperator":{ "shape":"ComparisonOperator", @@ -4315,7 +4444,7 @@ "documentation":"

The size in bytes that you want AWS WAF to compare against the size of the specified FieldToMatch. AWS WAF uses this in combination with ComparisonOperator and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

Valid values for size are 0 - 21474836480 bytes (0 - 20 GB).

If you specify URI for the value of Type, the / in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.

" } }, - "documentation":"

Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size, ComparisonOperator, and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size, ComparisonOperator, and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

" }, "SizeConstraintSet":{ "type":"structure", @@ -4337,7 +4466,7 @@ "documentation":"

Specifies the parts of web requests that you want to inspect the size of.

" } }, - "documentation":"

A complex type that contains SizeConstraint objects, which specify the parts of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet contains more than one SizeConstraint object, a request only needs to match one constraint to be considered a match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A complex type that contains SizeConstraint objects, which specify the parts of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet contains more than one SizeConstraint object, a request only needs to match one constraint to be considered a match.

" }, "SizeConstraintSetSummaries":{ "type":"list", @@ -4359,7 +4488,7 @@ "documentation":"

The name of the SizeConstraintSet, if any.

" } }, - "documentation":"

The Id and Name of a SizeConstraintSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The Id and Name of a SizeConstraintSet.

" }, "SizeConstraintSetUpdate":{ "type":"structure", @@ -4377,7 +4506,7 @@ "documentation":"

Specifies a constraint on the size of a part of the web request. AWS WAF uses the Size, ComparisonOperator, and FieldToMatch to build an expression in the form of \"Size ComparisonOperator size in bytes of FieldToMatch\". If that expression is true, the SizeConstraint is considered to match.

" } }, - "documentation":"

Specifies the part of a web request that you want to inspect the size of and indicates whether you want to add the specification to a SizeConstraintSet or delete it from a SizeConstraintSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the part of a web request that you want to inspect the size of and indicates whether you want to add the specification to a SizeConstraintSet or delete it from a SizeConstraintSet.

" }, "SizeConstraintSetUpdates":{ "type":"list", @@ -4408,7 +4537,7 @@ "documentation":"

Specifies the parts of web requests that you want to inspect for snippets of malicious SQL code.

" } }, - "documentation":"

A complex type that contains SqlInjectionMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple object, a request needs to include snippets of SQL code in only one of the specified parts of the request to be considered a match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A complex type that contains SqlInjectionMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple object, a request needs to include snippets of SQL code in only one of the specified parts of the request to be considered a match.

" }, "SqlInjectionMatchSetSummaries":{ "type":"list", @@ -4430,7 +4559,7 @@ "documentation":"

The name of the SqlInjectionMatchSet, if any, specified by Id.

" } }, - "documentation":"

The Id and Name of a SqlInjectionMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The Id and Name of a SqlInjectionMatchSet.

" }, "SqlInjectionMatchSetUpdate":{ "type":"structure", @@ -4448,7 +4577,7 @@ "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

" } }, - "documentation":"

Specifies the part of a web request that you want to inspect for snippets of malicious SQL code and indicates whether you want to add the specification to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the part of a web request that you want to inspect for snippets of malicious SQL code and indicates whether you want to add the specification to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet.

" }, "SqlInjectionMatchSetUpdates":{ "type":"list", @@ -4468,10 +4597,10 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" } }, - "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the part of a web request that you want AWS WAF to inspect for snippets of malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.

" }, "SqlInjectionMatchTuples":{ "type":"list", @@ -4502,26 +4631,45 @@ "documentation":"

A friendly name or description for the metrics for this RuleGroup. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with maximum length 128 and minimum length one. It can't contain whitespace or metric names reserved for AWS WAF, including \"All\" and \"Default_Action.\" You can't change the name of the metric after you create the RuleGroup.

" } }, - "documentation":"

A summary of the rule groups you are subscribed to.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A summary of the rule groups you are subscribed to.

" }, "Tag":{ "type":"structure", + "required":[ + "Key", + "Value" + ], "members":{ - "Key":{"shape":"TagKey"}, - "Value":{"shape":"TagValue"} - } + "Key":{ + "shape":"TagKey", + "documentation":"

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

" + } + }, + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

Tagging is only available through the API, SDKs, and CLI. You can't manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.

" }, "TagInfoForResource":{ "type":"structure", "members":{ - "ResourceARN":{"shape":"ResourceArn"}, - "TagList":{"shape":"TagList"} - } + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

" + }, + "TagList":{ + "shape":"TagList", + "documentation":"

" + } + }, + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Information for a tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

Tagging is only available through the API, SDKs, and CLI. You can't manage or view tags through the AWS WAF Classic console. You can tag the AWS resources that you manage through AWS WAF Classic: web ACLs, rule groups, and rules.

" }, "TagKey":{ "type":"string", "max":128, - "min":1 + "min":1, + "pattern":".*\\S.*" }, "TagKeyList":{ "type":"list", @@ -4540,8 +4688,14 @@ "Tags" ], "members":{ - "ResourceARN":{"shape":"ResourceArn"}, - "Tags":{"shape":"TagList"} + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

" + } } }, "TagResourceResponse":{ @@ -4552,7 +4706,8 @@ "TagValue":{ "type":"string", "max":256, - "min":0 + "min":0, + "pattern":".*" }, "TextTransformation":{ "type":"string", @@ -4574,14 +4729,14 @@ "members":{ "StartTime":{ "shape":"Timestamp", - "documentation":"

The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. Specify the date and time in the following format: \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" + "documentation":"

The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the date and time in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" }, "EndTime":{ "shape":"Timestamp", - "documentation":"

The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. Specify the date and time in the following format: \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" + "documentation":"

The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the date and time in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" } }, - "documentation":"

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\".

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

" }, "Timestamp":{"type":"timestamp"}, "URIString":{"type":"string"}, @@ -4592,8 +4747,14 @@ "TagKeys" ], "members":{ - "ResourceARN":{"shape":"ResourceArn"}, - "TagKeys":{"shape":"TagKeyList"} + "ResourceARN":{ + "shape":"ResourceArn", + "documentation":"

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

" + } } }, "UntagResourceResponse":{ @@ -4990,6 +5151,7 @@ "members":{ "message":{"shape":"errorMessage"} }, + "documentation":"

", "exception":true }, "WAFDisallowedNameException":{ @@ -5000,6 +5162,16 @@ "documentation":"

The name specified is invalid.

", "exception":true }, + "WAFEntityMigrationException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"}, + "MigrationErrorType":{"shape":"MigrationErrorType"}, + "MigrationErrorReason":{"shape":"ErrorReason"} + }, + "documentation":"

The operation failed due to a problem with the migration. The failure cause is provided in the exception, in the MigrationErrorType:

  • ENTITY_NOT_SUPPORTED - The web ACL has an unsupported entity but the IgnoreUnsupportedType is not set to true.

  • ENTITY_NOT_FOUND - The web ACL doesn't exist.

  • S3_BUCKET_NO_PERMISSION - You don't have permission to perform the PutObject action to the specified Amazon S3 bucket.

  • S3_BUCKET_NOT_ACCESSIBLE - The bucket policy doesn't allow AWS WAF to perform the PutObject action in the bucket.

  • S3_BUCKET_NOT_FOUND - The S3 bucket doesn't exist.

  • S3_BUCKET_INVALID_REGION - The S3 bucket is not in the same Region as the web ACL.

  • S3_INTERNAL_ERROR - AWS WAF failed to create the template in the S3 bucket for another reason.

", + "exception":true + }, "WAFInternalErrorException":{ "type":"structure", "members":{ @@ -5119,6 +5291,7 @@ "members":{ "message":{"shape":"errorMessage"} }, + "documentation":"

", "exception":true }, "WAFTagOperationInternalErrorException":{ @@ -5126,6 +5299,7 @@ "members":{ "message":{"shape":"errorMessage"} }, + "documentation":"

", "exception":true, "fault":true }, @@ -5146,7 +5320,7 @@ "documentation":"

Specifies how you want AWS WAF to respond to requests that match the settings in a Rule. Valid settings include the following:

  • ALLOW: AWS WAF allows requests

  • BLOCK: AWS WAF blocks requests

  • COUNT: AWS WAF increments a counter of the requests that match all of the conditions in the rule. AWS WAF then continues to inspect the web request based on the remaining rules in the web ACL. You can't specify COUNT for the default action for a WebACL.

" } }, - "documentation":"

For the action that is associated with a rule in a WebACL, specifies the action that you want AWS WAF to perform when a web request matches all of the conditions in a rule. For the default action in a WebACL, specifies the action that you want AWS WAF to take when a web request doesn't match all of the conditions in any of the rules in a WebACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

For the action that is associated with a rule in a WebACL, specifies the action that you want AWS WAF to perform when a web request matches all of the conditions in a rule. For the default action in a WebACL, specifies the action that you want AWS WAF to take when a web request doesn't match all of the conditions in any of the rules in a WebACL.

" }, "WafActionType":{ "type":"string", @@ -5165,7 +5339,7 @@ "documentation":"

COUNT overrides the action specified by the individual rule within a RuleGroup . If set to NONE, the rule's action will take place.

" } }, - "documentation":"

The action to take if any rule within the RuleGroup matches a request.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The action to take if any rule within the RuleGroup matches a request.

" }, "WafOverrideActionType":{ "type":"string", @@ -5215,7 +5389,7 @@ "documentation":"

Tha Amazon Resource Name (ARN) of the web ACL.

" } }, - "documentation":"

Contains the Rules that identify the requests that you want to allow, block, or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), and the action for each Rule that you add to a WebACL, for example, block requests from specified IP addresses or block requests from specified referrers. You also associate the WebACL with a CloudFront distribution to identify the requests that you want AWS WAF to filter. If you add more than one Rule to a WebACL, a request needs to match only one of the specifications to be allowed, blocked, or counted. For more information, see UpdateWebACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the Rules that identify the requests that you want to allow, block, or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), and the action for each Rule that you add to a WebACL, for example, block requests from specified IP addresses or block requests from specified referrers. You also associate the WebACL with a CloudFront distribution to identify the requests that you want AWS WAF to filter. If you add more than one Rule to a WebACL, a request needs to match only one of the specifications to be allowed, blocked, or counted. For more information, see UpdateWebACL.

" }, "WebACLSummaries":{ "type":"list", @@ -5237,7 +5411,7 @@ "documentation":"

A friendly name or description of the WebACL. You can't change the name of a WebACL after you create it.

" } }, - "documentation":"

Contains the identifier and the name or description of the WebACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Contains the identifier and the name or description of the WebACL.

" }, "WebACLUpdate":{ "type":"structure", @@ -5255,7 +5429,7 @@ "documentation":"

The ActivatedRule object in an UpdateWebACL request specifies a Rule that you want to insert or delete, the priority of the Rule in the WebACL, and the action that you want AWS WAF to take when a web request matches the Rule (ALLOW, BLOCK, or COUNT).

" } }, - "documentation":"

Specifies whether to insert a Rule into or delete a Rule from a WebACL.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies whether to insert a Rule into or delete a Rule from a WebACL.

" }, "WebACLUpdates":{ "type":"list", @@ -5281,7 +5455,7 @@ "documentation":"

Specifies the parts of web requests that you want to inspect for cross-site scripting attacks.

" } }, - "documentation":"

A complex type that contains XssMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header. If a XssMatchSet contains more than one XssMatchTuple object, a request needs to include cross-site scripting attacks in only one of the specified parts of the request to be considered a match.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

A complex type that contains XssMatchTuple objects, which specify the parts of web requests that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header. If a XssMatchSet contains more than one XssMatchTuple object, a request needs to include cross-site scripting attacks in only one of the specified parts of the request to be considered a match.

" }, "XssMatchSetSummaries":{ "type":"list", @@ -5303,7 +5477,7 @@ "documentation":"

The name of the XssMatchSet, if any, specified by Id.

" } }, - "documentation":"

The Id and Name of an XssMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

The Id and Name of an XssMatchSet.

" }, "XssMatchSetUpdate":{ "type":"structure", @@ -5321,7 +5495,7 @@ "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.

" } }, - "documentation":"

Specifies the part of a web request that you want to inspect for cross-site scripting attacks and indicates whether you want to add the specification to an XssMatchSet or delete it from an XssMatchSet.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the part of a web request that you want to inspect for cross-site scripting attacks and indicates whether you want to add the specification to an XssMatchSet or delete it from an XssMatchSet.

" }, "XssMatchSetUpdates":{ "type":"list", @@ -5341,10 +5515,10 @@ }, "TextTransformation":{ "shape":"TextTransformation", - "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting a request for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" + "documentation":"

Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass AWS WAF. If you specify a transformation, AWS WAF performs the transformation on FieldToMatch before inspecting it for a match.

You can only specify a single type of TextTransformation.

CMD_LINE

When you're concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:

  • Delete the following characters: \\ \" ' ^

  • Delete spaces before the following characters: / (

  • Replace the following characters with a space: , ;

  • Replace multiple spaces with one space

  • Convert uppercase letters (A-Z) to lowercase (a-z)

COMPRESS_WHITE_SPACE

Use this option to replace the following characters with a space character (decimal 32):

  • \\f, formfeed, decimal 12

  • \\t, tab, decimal 9

  • \\n, newline, decimal 10

  • \\r, carriage return, decimal 13

  • \\v, vertical tab, decimal 11

  • non-breaking space, decimal 160

COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.

HTML_ENTITY_DECODE

Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:

  • Replaces (ampersand)quot; with \"

  • Replaces (ampersand)nbsp; with a non-breaking space, decimal 160

  • Replaces (ampersand)lt; with a \"less than\" symbol

  • Replaces (ampersand)gt; with >

  • Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding characters

  • Replaces characters that are represented in decimal format, (ampersand)#nnnn;, with the corresponding characters

LOWERCASE

Use this option to convert uppercase letters (A-Z) to lowercase (a-z).

URL_DECODE

Use this option to decode a URL-encoded value.

NONE

Specify NONE if you don't want to perform any text transformations.

" } }, - "documentation":"

Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.

" + "documentation":"

This is AWS WAF Classic documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

Specifies the part of a web request that you want AWS WAF to inspect for cross-site scripting attacks and, if you want AWS WAF to inspect a header, the name of the header.

" }, "XssMatchTuples":{ "type":"list", @@ -5352,5 +5526,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"

This is the AWS WAF Regional API Reference for using AWS WAF with Elastic Load Balancing (ELB) Application Load Balancers. The AWS WAF actions and data types listed in the reference are available for protecting Application Load Balancers. You can use these actions and data types by means of the endpoints listed in AWS Regions and Endpoints. This guide is for developers who need detailed information about the AWS WAF API actions, data types, and errors. For detailed information about AWS WAF features and an overview of how to use the AWS WAF API, see the AWS WAF Developer Guide.

" + "documentation":"

This is AWS WAF Classic Regional documentation. For more information, see AWS WAF Classic in the developer guide.

For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS WAF Developer Guide. With the latest version, AWS WAF has a single set of endpoints for regional and global use.

This is the AWS WAF Regional Classic API Reference for using AWS WAF Classic with the AWS resources, Elastic Load Balancing (ELB) Application Load Balancers and API Gateway APIs. The AWS WAF Classic actions and data types listed in the reference are available for protecting Elastic Load Balancing (ELB) Application Load Balancers and API Gateway APIs. You can use these actions and data types by means of the endpoints listed in AWS Regions and Endpoints. This guide is for developers who need detailed information about the AWS WAF Classic API actions, data types, and errors. For detailed information about AWS WAF Classic features and an overview of how to use the AWS WAF Classic API, see the AWS WAF Classic in the developer guide.

" } diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index fa125e2fabad..eae11a53157a 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wafv2/src/main/resources/codegen-resources/service-2.json b/services/wafv2/src/main/resources/codegen-resources/service-2.json index 6b2dbf9d250a..24907c7aff6b 100644 --- a/services/wafv2/src/main/resources/codegen-resources/service-2.json +++ b/services/wafv2/src/main/resources/codegen-resources/service-2.json @@ -548,7 +548,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves the TagInfoForResource for the specified resource.

" + "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Retrieves the TagInfoForResource for the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" }, "ListWebACLs":{ "name":"ListWebACLs", @@ -579,9 +579,10 @@ {"shape":"WAFOptimisticLockException"}, {"shape":"WAFServiceLinkedRoleErrorException"}, {"shape":"WAFInvalidParameterException"}, - {"shape":"WAFInvalidOperationException"} + {"shape":"WAFInvalidOperationException"}, + {"shape":"WAFLimitsExceededException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Enables the specified LoggingConfiguration, to start logging from a web ACL, according to the configuration provided.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the Region that you are operating. If you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" + "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Enables the specified LoggingConfiguration, to start logging from a web ACL, according to the configuration provided.

You can access information about all traffic that AWS WAF inspects using the following steps:

  1. Create an Amazon Kinesis Data Firehose.

    Create the data firehose with a PUT source and in the Region that you are operating. If you are capturing logs for Amazon CloudFront, always create the firehose in US East (N. Virginia).

    Give the data firehose a name that starts with the prefix aws-waf-logs-. For example, aws-waf-logs-us-east-2-analytics.

    Do not create the data firehose using a Kinesis stream as your source.

  2. Associate that firehose to your web ACL using a PutLoggingConfiguration request.

When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide.

" }, "PutPermissionPolicy":{ "name":"PutPermissionPolicy", @@ -616,7 +617,7 @@ {"shape":"WAFTagOperationInternalErrorException"}, {"shape":"WAFInvalidOperationException"} ], - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Associates tags with the specified AWS resource. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be \"customer\" and the tag value might be \"companyA.\" You can specify one or more tags to add to each container. You can add up to 50 tags to each AWS resource.

" + "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Associates tags with the specified AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" }, "UntagResource":{ "name":"UntagResource", @@ -1579,6 +1580,13 @@ "type":"list", "member":{"shape":"ExcludedRule"} }, + "FallbackBehavior":{ + "type":"string", + "enum":[ + "MATCH", + "NO_MATCH" + ] + }, "FieldToMatch":{ "type":"structure", "members":{ @@ -1658,12 +1666,48 @@ }, "documentation":"

The processing guidance for an AWS Firewall Manager rule. This is like a regular rule Statement, but it can only contain a rule group reference.

" }, + "ForwardedIPConfig":{ + "type":"structure", + "required":[ + "HeaderName", + "FallbackBehavior" + ], + "members":{ + "HeaderName":{ + "shape":"ForwardedIPHeaderName", + "documentation":"

The name of the HTTP header to use for the IP address. For example, to use the X-Forwarded-For (XFF) header, set this to X-Forwarded-For.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

" + }, + "FallbackBehavior":{ + "shape":"FallbackBehavior", + "documentation":"

The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

You can specify the following fallback behaviors:

  • MATCH - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.

  • NO_MATCH - Treat the web request as not matching the rule statement.

" + } + }, + "documentation":"

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

This configuration is used for GeoMatchStatement and RateBasedStatement. For IPSetReferenceStatement, use IPSetForwardedIPConfig instead.

AWS WAF only evaluates the first IP address found in the specified HTTP header.

" + }, + "ForwardedIPHeaderName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9-]+$" + }, + "ForwardedIPPosition":{ + "type":"string", + "enum":[ + "FIRST", + "LAST", + "ANY" + ] + }, "GeoMatchStatement":{ "type":"structure", "members":{ "CountryCodes":{ "shape":"CountryCodes", "documentation":"

An array of two-character country codes, for example, [ \"US\", \"CN\" ], from the alpha-2 country ISO codes of the ISO 3166 international standard.

" + }, + "ForwardedIPConfig":{ + "shape":"ForwardedIPConfig", + "documentation":"

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

" } }, "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule statement used to identify web requests based on country of origin.

" @@ -1875,7 +1919,7 @@ }, "TimeWindow":{ "shape":"TimeWindow", - "documentation":"

The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. Specify the date and time in the following format: \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" + "documentation":"

The start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" }, "MaxItems":{ "shape":"ListMaxItems", @@ -1896,7 +1940,7 @@ }, "TimeWindow":{ "shape":"TimeWindow", - "documentation":"

Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests.

" + "documentation":"

Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests. Times are in Coordinated Universal Time (UTC) format.

" } } }, @@ -2060,6 +2104,29 @@ }, "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Contains one or more IP addresses or blocks of IP addresses specified in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports any CIDR range. For information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.

AWS WAF assigns an ARN to each IPSet that you create. To use an IP set in a rule, you provide the ARN to the Rule statement IPSetReferenceStatement.

" }, + "IPSetForwardedIPConfig":{ + "type":"structure", + "required":[ + "HeaderName", + "FallbackBehavior", + "Position" + ], + "members":{ + "HeaderName":{ + "shape":"ForwardedIPHeaderName", + "documentation":"

The name of the HTTP header to use for the IP address. For example, to use the X-Forwarded-For (XFF) header, set this to X-Forwarded-For.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

" + }, + "FallbackBehavior":{ + "shape":"FallbackBehavior", + "documentation":"

The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

You can specify the following fallback behaviors:

  • MATCH - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.

  • NO_MATCH - Treat the web request as not matching the rule statement.

" + }, + "Position":{ + "shape":"ForwardedIPPosition", + "documentation":"

The position in the header to search for the IP address. The header can contain IP addresses of the original client and also of proxies. For example, the header value could be 10.1.1.1, 127.0.0.0, 10.10.10.10 where the first IP address identifies the original client and the rest identify proxies that the request went through.

The options for this setting are the following:

  • FIRST - Inspect the first IP address in the list of IP addresses in the header. This is usually the client's original IP.

  • LAST - Inspect the last IP address in the list of IP addresses in the header.

  • ANY - Inspect all IP addresses in the header for a match. If the header contains more than 10 IP addresses, AWS WAF inspects the last 10.

" + } + }, + "documentation":"

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

This configuration is used only for IPSetReferenceStatement. For GeoMatchStatement and RateBasedStatement, use ForwardedIPConfig instead.

" + }, "IPSetReferenceStatement":{ "type":"structure", "required":["ARN"], @@ -2067,6 +2134,10 @@ "ARN":{ "shape":"ResourceArn", "documentation":"

The Amazon Resource Name (ARN) of the IPSet that this statement references.

" + }, + "IPSetForwardedIPConfig":{ + "shape":"IPSetForwardedIPConfig", + "documentation":"

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

" } }, "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet.

Each IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.

" @@ -2533,7 +2604,12 @@ "TAGS", "TAG_KEYS", "METRIC_NAME", - "FIREWALL_MANAGER_STATEMENT" + "FIREWALL_MANAGER_STATEMENT", + "FALLBACK_BEHAVIOR", + "POSITION", + "FORWARDED_IP_CONFIG", + "IP_SET_FORWARDED_IP_CONFIG", + "HEADER_NAME" ] }, "ParameterExceptionParameter":{ @@ -2611,22 +2687,29 @@ "members":{ "Limit":{ "shape":"RateLimit", - "documentation":"

The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopDownStatement, this limit is applied only to the requests that match the statement.

" + "documentation":"

The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopeDownStatement, this limit is applied only to the requests that match the statement.

" }, "AggregateKeyType":{ "shape":"RateBasedStatementAggregateKeyType", - "documentation":"

Setting that indicates how to aggregate the request counts. Currently, you must set this to IP. The request counts are aggregated on IP addresses.

" + "documentation":"

Setting that indicates how to aggregate the request counts. The options are the following:

  • IP - Aggregate the request counts on the IP address from the web request origin.

  • FORWARDED_IP - Aggregate the request counts on the first IP address in an HTTP header. If you use this, configure the ForwardedIPConfig, to specify the header to use.

" }, "ScopeDownStatement":{ "shape":"Statement", "documentation":"

An optional nested statement that narrows the scope of the rate-based statement to matching web requests. This can be any nestable statement, and you can nest statements at any level below this scope-down statement.

" + }, + "ForwardedIPConfig":{ + "shape":"ForwardedIPConfig", + "documentation":"

The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all.

This is required if AggregateKeyType is set to FORWARDED_IP.

" } }, "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

When the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

  • An IP match statement with an IP set that specified the address 192.0.2.44.

  • A string match statement that searches in the User-Agent header for the string BadBot.

In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.

You cannot nest a RateBasedStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" }, "RateBasedStatementAggregateKeyType":{ "type":"string", - "enum":["IP"] + "enum":[ + "IP", + "FORWARDED_IP" + ] }, "RateBasedStatementManagedKeysIPSet":{ "type":"structure", @@ -3125,7 +3208,7 @@ "documentation":"

Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A collection of key:value pairs associated with an AWS resource. The key:value pair can be anything you define. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each AWS resource.

" + "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

A tag associated with an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" }, "TagInfoForResource":{ "type":"structure", @@ -3139,7 +3222,7 @@ "documentation":"

The array of Tag objects defined for the resource.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The collection of tagging definitions for an AWS resource.

" + "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

The collection of tagging definitions for an AWS resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing or other management. Typically, the tag key represents a category, such as \"environment\", and the tag value represents a specific value within that category, such as \"test,\" \"development,\" or \"production\". Or you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each AWS resource, up to 50 tags for a resource.

You can tag the AWS resources that you manage through AWS WAF: web ACLs, rule groups, IP sets, and regex pattern sets. You can't manage or view tags through the AWS WAF console.

" }, "TagKey":{ "type":"string", @@ -3232,14 +3315,14 @@ "members":{ "StartTime":{ "shape":"Timestamp", - "documentation":"

The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. Specify the date and time in the following format: \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" + "documentation":"

The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" }, "EndTime":{ "shape":"Timestamp", - "documentation":"

The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. Specify the date and time in the following format: \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" + "documentation":"

The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

" } }, - "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

" + "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

In a GetSampledRequests request, the StartTime and EndTime objects specify the time range for which you want AWS WAF to return a sample of web requests.

You must specify the times in Coordinated Universal Time (UTC) format. UTC format includes the special designator, Z. For example, \"2016-09-27T14:50Z\". You can specify any time range in the previous three hours.

In a GetSampledRequests response, the StartTime and EndTime objects specify the time range for which AWS WAF actually returned a sample of web requests. AWS WAF gets the specified number of requests from among the first 5,000 requests that your AWS resource receives during the specified time period. If your resource receives more than 5,000 requests during that period, AWS WAF stops sampling after the 5,000th request. In that case, EndTime is the time that AWS WAF received the 5,000th request.

" }, "Timestamp":{"type":"timestamp"}, "URIString":{"type":"string"}, @@ -3488,7 +3571,7 @@ }, "MetricName":{ "shape":"MetricName", - "documentation":"

A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can't contain whitespace or metric names reserved for AWS WAF, for example \"All\" and \"Default_Action.\" You can't change a MetricName after you create a VisibilityConfig.

" + "documentation":"

A name of the CloudWatch metric. The name can contain only the characters: A-Z, a-z, 0-9, - (hyphen), and _ (underscore). The name can be from one to 128 characters long. It can't contain whitespace or metric names reserved for AWS WAF, for example \"All\" and \"Default_Action.\" You can't change a MetricName after you create a VisibilityConfig.

" } }, "documentation":"

This is the latest version of AWS WAF, named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide.

Defines and enables Amazon CloudWatch metrics and web request sample collection.

" diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index 3161762ff064..d88157427910 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/worklink/pom.xml b/services/worklink/pom.xml index 655b343c3054..68a77c3fdfb9 100644 --- a/services/worklink/pom.xml +++ b/services/worklink/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT worklink AWS Java SDK :: Services :: WorkLink diff --git a/services/worklink/src/main/resources/codegen-resources/service-2.json b/services/worklink/src/main/resources/codegen-resources/service-2.json index 6383c10581b5..b60eccaa6ebd 100644 --- a/services/worklink/src/main/resources/codegen-resources/service-2.json +++ b/services/worklink/src/main/resources/codegen-resources/service-2.json @@ -319,6 +319,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"}, {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], "documentation":"

Retrieves a list of domains associated to a specified fleet.

" @@ -339,6 +340,19 @@ ], "documentation":"

Retrieves a list of fleets for the current account and Region.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"} + ], + "documentation":"

Retrieves a list of tags for the specified resource.

" + }, "ListWebsiteAuthorizationProviders":{ "name":"ListWebsiteAuthorizationProviders", "http":{ @@ -423,6 +437,32 @@ ], "documentation":"

Signs the user out from all of their devices. The user can sign in again if they have valid credentials.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"} + ], + "documentation":"

Adds or overwrites one or more tags for the specified resource, such as a fleet. Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"} + ], + "documentation":"

Removes one or more tags from the specified resource.

" + }, "UpdateAuditStreamConfiguration":{ "name":"UpdateAuditStreamConfiguration", "http":{ @@ -622,7 +662,10 @@ } } }, - "AuditStreamArn":{"type":"string"}, + "AuditStreamArn":{ + "type":"string", + "pattern":"^arn:aws:kinesis:.+:[0-9]{12}:stream/AmazonWorkLink-.*$" + }, "AuthorizationProviderType":{ "type":"string", "enum":["SAML"] @@ -660,6 +703,10 @@ "OptimizeForEndUserLocation":{ "shape":"Boolean", "documentation":"

The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags to add to the resource. A tag is a key-value pair.

" } } }, @@ -668,7 +715,7 @@ "members":{ "FleetArn":{ "shape":"FleetArn", - "documentation":"

The ARN of the fleet.

" + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

" } } }, @@ -859,7 +906,7 @@ "members":{ "FleetArn":{ "shape":"FleetArn", - "documentation":"

The ARN of the fleet.

" + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

" } } }, @@ -893,6 +940,10 @@ "FleetStatus":{ "shape":"FleetStatus", "documentation":"

The current state of the fleet.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags attached to the resource. A tag is a key-value pair.

" } } }, @@ -1155,7 +1206,7 @@ "members":{ "FleetArn":{ "shape":"FleetArn", - "documentation":"

The ARN of the fleet.

" + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

" }, "CreatedTime":{ "shape":"DateTime", @@ -1171,7 +1222,7 @@ }, "DisplayName":{ "shape":"DisplayName", - "documentation":"

The name to display.

" + "documentation":"

The name of the fleet to display.

" }, "CompanyCode":{ "shape":"CompanyCode", @@ -1180,6 +1231,10 @@ "FleetStatus":{ "shape":"FleetStatus", "documentation":"

The status of the fleet.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags attached to the resource. A tag is a key-value pair.

" } }, "documentation":"

The summary of the fleet.

" @@ -1303,6 +1358,27 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags attached to the resource. A tag is a key-value pair.

" + } + } + }, "ListWebsiteAuthorizationProvidersRequest":{ "type":"structure", "required":["FleetArn"], @@ -1481,6 +1557,53 @@ "type":"list", "member":{"shape":"SubnetId"} }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

The tags to add to the resource. A tag is a key-value pair.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, "TooManyRequestsException":{ "type":"structure", "members":{ @@ -1499,6 +1622,32 @@ "error":{"httpStatusCode":403}, "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"FleetArn", + "documentation":"

The Amazon Resource Name (ARN) of the fleet.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The list of tag keys to remove from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateAuditStreamConfigurationRequest":{ "type":"structure", "required":["FleetArn"], @@ -1703,5 +1852,5 @@ "member":{"shape":"WebsiteCaSummary"} } }, - "documentation":"

Amazon WorkLink is a cloud-based service that provides secure access to internal websites and web apps from iOS phones. In a single step, your users, such as employees, can access internal websites as efficiently as they access any other public website. They enter a URL in their web browser, or choose a link to an internal website in an email. Amazon WorkLink authenticates the user's access and securely renders authorized internal web content in a secure rendering service in the AWS cloud. Amazon WorkLink doesn't download or store any internal web content on mobile devices.

" + "documentation":"

Amazon WorkLink is a cloud-based service that provides secure access to internal websites and web apps from iOS and Android phones. In a single step, your users, such as employees, can access internal websites as efficiently as they access any other public website. They enter a URL in their web browser, or choose a link to an internal website in an email. Amazon WorkLink authenticates the user's access and securely renders authorized internal web content in a secure rendering service in the AWS cloud. Amazon WorkLink doesn't download or store any internal web content on mobile devices.

" } diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index 59389ffab348..d59b64aee7aa 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 workmail diff --git a/services/workmail/src/main/resources/codegen-resources/service-2.json b/services/workmail/src/main/resources/codegen-resources/service-2.json index 58289d38c4ef..3d10367a4d66 100644 --- a/services/workmail/src/main/resources/codegen-resources/service-2.json +++ b/services/workmail/src/main/resources/codegen-resources/service-2.json @@ -223,6 +223,22 @@ "documentation":"

Deletes the specified resource.

", "idempotent":true }, + "DeleteRetentionPolicy":{ + "name":"DeleteRetentionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRetentionPolicyRequest"}, + "output":{"shape":"DeleteRetentionPolicyResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OrganizationNotFoundException"}, + {"shape":"OrganizationStateException"} + ], + "documentation":"

Deletes the specified retention policy from the specified organization.

", + "idempotent":true + }, "DeleteUser":{ "name":"DeleteUser", "http":{ @@ -382,6 +398,23 @@ ], "documentation":"

Gets the effects of an organization's access control rules as they apply to a specified IPv4 address, access protocol action, or user ID.

" }, + "GetDefaultRetentionPolicy":{ + "name":"GetDefaultRetentionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDefaultRetentionPolicyRequest"}, + "output":{"shape":"GetDefaultRetentionPolicyResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OrganizationNotFoundException"}, + {"shape":"OrganizationStateException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

Gets the default retention policy details for the specified organization.

", + "idempotent":true + }, "GetMailboxDetails":{ "name":"GetMailboxDetails", "http":{ @@ -493,7 +526,7 @@ "errors":[ {"shape":"InvalidParameterException"} ], - "documentation":"

Returns summaries of the customer's non-deleted organizations.

", + "documentation":"

Returns summaries of the customer's organizations.

", "idempotent":true }, "ListResourceDelegates":{ @@ -594,6 +627,23 @@ "documentation":"

Sets permissions for a user, group, or resource. This replaces any pre-existing permissions.

", "idempotent":true }, + "PutRetentionPolicy":{ + "name":"PutRetentionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutRetentionPolicyRequest"}, + "output":{"shape":"PutRetentionPolicyResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OrganizationNotFoundException"}, + {"shape":"OrganizationStateException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Puts a retention policy to the specified organization.

", + "idempotent":true + }, "RegisterToWorkMail":{ "name":"RegisterToWorkMail", "http":{ @@ -1051,7 +1101,10 @@ }, "DeleteAccessControlRuleRequest":{ "type":"structure", - "required":["Name"], + "required":[ + "OrganizationId", + "Name" + ], "members":{ "OrganizationId":{ "shape":"OrganizationId", @@ -1166,6 +1219,28 @@ "members":{ } }, + "DeleteRetentionPolicyRequest":{ + "type":"structure", + "required":[ + "OrganizationId", + "Id" + ], + "members":{ + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

The organization ID.

" + }, + "Id":{ + "shape":"ShortString", + "documentation":"

The retention policy ID.

" + } + } + }, + "DeleteRetentionPolicyResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteUserRequest":{ "type":"structure", "required":[ @@ -1531,6 +1606,42 @@ "documentation":"

You are performing an operation on a user, group, or resource that isn't in the expected state, such as trying to delete an active user.

", "exception":true }, + "FolderConfiguration":{ + "type":"structure", + "required":[ + "Name", + "Action" + ], + "members":{ + "Name":{ + "shape":"FolderName", + "documentation":"

The folder name.

" + }, + "Action":{ + "shape":"RetentionAction", + "documentation":"

The action to take on the folder contents at the end of the folder configuration period.

" + }, + "Period":{ + "shape":"RetentionPeriod", + "documentation":"

The period of time at which the folder configuration action is applied.

" + } + }, + "documentation":"

The configuration applied to an organization's folders by its retention policy.

" + }, + "FolderConfigurations":{ + "type":"list", + "member":{"shape":"FolderConfiguration"} + }, + "FolderName":{ + "type":"string", + "enum":[ + "INBOX", + "DELETED_ITEMS", + "SENT_ITEMS", + "DRAFTS", + "JUNK_EMAIL" + ] + }, "GetAccessControlEffectRequest":{ "type":"structure", "required":[ @@ -1571,6 +1682,37 @@ } } }, + "GetDefaultRetentionPolicyRequest":{ + "type":"structure", + "required":["OrganizationId"], + "members":{ + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

The organization ID.

" + } + } + }, + "GetDefaultRetentionPolicyResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ShortString", + "documentation":"

The retention policy ID.

" + }, + "Name":{ + "shape":"ShortString", + "documentation":"

The retention policy name.

" + }, + "Description":{ + "shape":"String", + "documentation":"

The retention policy description.

" + }, + "FolderConfigurations":{ + "shape":"FolderConfigurations", + "documentation":"

The retention policy folder configurations.

" + } + } + }, "GetMailboxDetailsRequest":{ "type":"structure", "required":[ @@ -2182,6 +2324,11 @@ "type":"list", "member":{"shape":"Permission"} }, + "PolicyDescription":{ + "type":"string", + "max":256, + "pattern":"[\\w\\d\\s\\S\\-!?=,.;:'_]+" + }, "PutAccessControlRuleRequest":{ "type":"structure", "required":[ @@ -2270,6 +2417,41 @@ "members":{ } }, + "PutRetentionPolicyRequest":{ + "type":"structure", + "required":[ + "OrganizationId", + "Name", + "FolderConfigurations" + ], + "members":{ + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

The organization ID.

" + }, + "Id":{ + "shape":"ShortString", + "documentation":"

The retention policy ID.

" + }, + "Name":{ + "shape":"ShortString", + "documentation":"

The retention policy name.

" + }, + "Description":{ + "shape":"PolicyDescription", + "documentation":"

The retention policy description.

" + }, + "FolderConfigurations":{ + "shape":"FolderConfigurations", + "documentation":"

The retention policy folder configurations.

" + } + } + }, + "PutRetentionPolicyResponse":{ + "type":"structure", + "members":{ + } + }, "RegisterToWorkMailRequest":{ "type":"structure", "required":[ @@ -2399,6 +2581,26 @@ "type":"list", "member":{"shape":"Resource"} }, + "RetentionAction":{ + "type":"string", + "enum":[ + "NONE", + "DELETE", + "PERMANENTLY_DELETE" + ] + }, + "RetentionPeriod":{ + "type":"integer", + "box":true, + "max":730, + "min":1 + }, + "ShortString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, "String":{ "type":"string", "max":256 diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index bad7f4744848..a2743fd817bc 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index fd14c97de16c..9af8f1db1624 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/xray/pom.xml b/services/xray/pom.xml index 178acf05a60b..c2865bebc7ac 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 96fec9a1748d..53c2c2f68dc9 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../../pom.xml @@ -75,6 +75,11 @@ http-client-spi ${awsjavasdk.version}
+ + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + software.amazon.awssdk sdk-core diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscovery/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscovery/service-2.json new file mode 100644 index 000000000000..3ba86efbbb2d --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/endpointdiscovery/service-2.json @@ -0,0 +1,135 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-08-31", + "endpointPrefix":"awsendpointdiscoverytestservice", + "jsonVersion":"1.1", + "protocol":"json", + "serviceAbbreviation":"AwsEndpointDiscoveryTest", + "serviceFullName":"AwsEndpointDiscoveryTest", + "serviceId":"AwsEndpointDiscoveryTest", + "signatureVersion":"v4", + "signingName":"awsendpointdiscoverytestservice", + "targetPrefix":"AwsEndpointDiscoveryTestService" + }, + "operations":{ + "DescribeEndpoints":{ + "name":"DescribeEndpoints", + "http":{ + "method":"POST", + "requestUri":"/DescribeEndpoints" + }, + "input":{"shape":"DescribeEndpointsRequest"}, + "output":{"shape":"DescribeEndpointsResponse"}, + "endpointoperation":true + }, + "TestDiscoveryIdentifiersRequired":{ + "name":"TestDiscoveryIdentifiersRequired", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestDiscoveryIdentifiersRequiredRequest"}, + "output":{"shape":"TestDiscoveryIdentifiersRequiredResponse"}, + "endpointdiscovery":{"required":true} + }, + "TestDiscoveryOptional":{ + "name":"TestDiscoveryOptional", + "http":{ + "method":"POST", + "requestUri":"/TestDiscoveryOptional" + }, + "input":{"shape":"TestDiscoveryOptionalRequest"}, + "output":{"shape":"TestDiscoveryOptionalResponse"}, + "endpointdiscovery":{ + } + }, + "TestDiscoveryRequired":{ + "name":"TestDiscoveryRequired", + "http":{ + "method":"POST", + "requestUri":"/TestDiscoveryRequired" + }, + "input":{"shape":"TestDiscoveryRequiredRequest"}, + "output":{"shape":"TestDiscoveryRequiredResponse"}, + "endpointdiscovery":{"required":true} + } + }, + "shapes":{ + "Boolean":{"type":"boolean"}, + "DescribeEndpointsRequest":{ + "type":"structure", + "members":{ + "Operation":{"shape":"String"}, + "Identifiers":{"shape":"Identifiers"} + } + }, + "DescribeEndpointsResponse":{ + "type":"structure", + "required":["Endpoints"], + "members":{ + "Endpoints":{"shape":"Endpoints"} + } + }, + "Endpoint":{ + "type":"structure", + "required":[ + "Address", + "CachePeriodInMinutes" + ], + "members":{ + "Address":{"shape":"String"}, + "CachePeriodInMinutes":{"shape":"Long"} + } + }, + "Endpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, + "Identifiers":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "Long":{"type":"long"}, + "String":{"type":"string"}, + "TestDiscoveryIdentifiersRequiredRequest":{ + "type":"structure", + "required":["Sdk"], + "members":{ + "Sdk":{ + "shape":"String", + "endpointdiscoveryid":true + } + } + }, + "TestDiscoveryIdentifiersRequiredResponse":{ + "type":"structure", + "members":{ + "DiscoveredEndpoint":{"shape":"Boolean"} + } + }, + "TestDiscoveryOptionalRequest":{ + "type":"structure", + "members":{ + } + }, + "TestDiscoveryOptionalResponse":{ + "type":"structure", + "members":{ + "DiscoveredEndpoint":{"shape":"Boolean"} + } + }, + "TestDiscoveryRequiredRequest":{ + "type":"structure", + "members":{ + } + }, + "TestDiscoveryRequiredResponse":{ + "type":"structure", + "members":{ + "DiscoveredEndpoint":{"shape":"Boolean"} + } + } + } +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/EndpointDiscoveryTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/EndpointDiscoveryTest.java new file mode 100644 index 000000000000..50fdd40116a7 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/EndpointDiscoveryTest.java @@ -0,0 +1,161 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.URI; +import java.util.concurrent.ExecutionException; +import org.assertj.core.api.AbstractThrowableAssert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.endpointdiscovery.EndpointDiscoveryFailedException; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.endpointdiscoverytest.EndpointDiscoveryTestAsyncClient; +import software.amazon.awssdk.services.endpointdiscoverytest.EndpointDiscoveryTestClient; +import software.amazon.awssdk.services.endpointdiscoverytest.model.EndpointDiscoveryTestException; + +public class EndpointDiscoveryTest { + + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + private EndpointDiscoveryTestClient client; + + private EndpointDiscoveryTestAsyncClient asyncClient; + + @Before + public void setupClient() { + client = EndpointDiscoveryTestClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .endpointDiscoveryEnabled(true) + .build(); + + asyncClient = EndpointDiscoveryTestAsyncClient.builder() + .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid"))) + .region(Region.US_EAST_1) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .endpointDiscoveryEnabled(true) + .build(); + } + + @Test + public void syncRequiredOperation_EmptyEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubEmptyResponse(); + assertThatThrownBy(() -> client.testDiscoveryRequired(r -> {})) + .isInstanceOf(EndpointDiscoveryFailedException.class) + .hasCauseInstanceOf(IllegalArgumentException.class); + } + + @Test + public void asyncRequiredOperation_EmptyEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubEmptyResponse(); + assertAsyncRequiredOperationCallThrowable() + .isInstanceOf(EndpointDiscoveryFailedException.class) + .hasCauseInstanceOf(IllegalArgumentException.class); + } + + @Test + public void syncRequiredOperation_NonRetryableEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubDescribeEndpointsResponse(404); + assertThatThrownBy(() -> client.testDiscoveryRequired(r -> {})) + .isInstanceOf(EndpointDiscoveryFailedException.class) + .hasCauseInstanceOf(EndpointDiscoveryTestException.class); + } + + @Test + public void asyncRequiredOperation_NonRetryableEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubDescribeEndpointsResponse(404); + assertAsyncRequiredOperationCallThrowable() + .isInstanceOf(EndpointDiscoveryFailedException.class) + .hasCauseInstanceOf(EndpointDiscoveryTestException.class); + } + + @Test + public void syncRequiredOperation_RetryableEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubDescribeEndpointsResponse(500); + assertThatThrownBy(() -> client.testDiscoveryRequired(r -> {})) + .isInstanceOf(EndpointDiscoveryFailedException.class) + .hasCauseInstanceOf(EndpointDiscoveryTestException.class); + } + + @Test + public void asyncRequiredOperation_RetryableEndpointDiscoveryResponse_CausesEndpointDiscoveryFailedException() { + stubDescribeEndpointsResponse(500); + assertAsyncRequiredOperationCallThrowable() + .isInstanceOf(EndpointDiscoveryFailedException.class) + .hasCauseInstanceOf(EndpointDiscoveryTestException.class); + } + + @Test + public void syncRequiredOperation_InvalidEndpointEndpointDiscoveryResponse_CausesSdkException() { + stubDescribeEndpointsResponse(200, "invalid", 15); + assertThatThrownBy(() -> client.testDiscoveryRequired(r -> {})) + .isInstanceOf(SdkClientException.class); + } + + @Test + public void asyncRequiredOperation_InvalidEndpointEndpointDiscoveryResponse_CausesSdkException() { + stubDescribeEndpointsResponse(200, "invalid", 15); + assertAsyncRequiredOperationCallThrowable() + .isInstanceOf(SdkClientException.class); + } + + private void stubEmptyResponse() { + stubFor(post(anyUrl()) + .willReturn(aResponse().withStatus(200) + .withBody("{}"))); + } + + private void stubDescribeEndpointsResponse(int status) { + stubDescribeEndpointsResponse(status, "localhost", 60); + } + + private void stubDescribeEndpointsResponse(int status, String address, long cachePeriodInMinutes) { + stubFor(post(urlPathEqualTo("/DescribeEndpoints")) + .willReturn(aResponse().withStatus(status) + .withBody("{" + + " \"Endpoints\": [{" + + " \"Address\": \"" + address + "\"," + + " \"CachePeriodInMinutes\": " + cachePeriodInMinutes + + " }]" + + "}"))); + } + + private AbstractThrowableAssert assertAsyncRequiredOperationCallThrowable() { + try { + asyncClient.testDiscoveryRequired(r -> {}).get(); + throw new AssertionError(); + } catch (InterruptedException e) { + return assertThat(e); + } catch (ExecutionException e) { + return assertThat(e.getCause()); + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/InvalidRegionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/InvalidRegionTest.java new file mode 100644 index 000000000000..623f7cbc0386 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/InvalidRegionTest.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import org.junit.Test; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; + +public class InvalidRegionTest { + @Test + public void invalidClientRegionGivesHelpfulMessage() { + assertThatThrownBy(() -> ProtocolRestJsonClient.builder() + .region(Region.of("US_EAST_1")) + .credentialsProvider(AnonymousCredentialsProvider.create()) + .build()) + .isInstanceOf(SdkClientException.class) + .hasMessageContaining("US_EAST_1") + .hasMessageContaining("region"); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/ModelSerializationTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/ModelSerializationTest.java new file mode 100644 index 000000000000..b70fc339db3c --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/ModelSerializationTest.java @@ -0,0 +1,119 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.assertj.core.api.Assertions.assertThat; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.module.SimpleModule; +import java.io.IOException; +import java.time.Instant; +import org.junit.Test; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.protocolrestjson.model.AllTypesRequest; +import software.amazon.awssdk.services.protocolrestjson.model.BaseType; +import software.amazon.awssdk.services.protocolrestjson.model.RecursiveStructType; +import software.amazon.awssdk.services.protocolrestjson.model.SimpleStruct; +import software.amazon.awssdk.services.protocolrestjson.model.StructWithNestedBlobType; +import software.amazon.awssdk.services.protocolrestjson.model.StructWithTimestamp; +import software.amazon.awssdk.services.protocolrestjson.model.SubTypeOne; + +/** + * Verify that modeled objects can be marshalled using Jackson. + */ +public class ModelSerializationTest { + @Test + public void jacksonSerializationWorksForEmptyRequestObjects() throws IOException { + validateJacksonSerialization(AllTypesRequest.builder().build()); + } + + @Test + public void jacksonSerializationWorksForPopulatedRequestModels() throws IOException { + SdkBytes blob = SdkBytes.fromUtf8String("foo"); + + SimpleStruct simpleStruct = SimpleStruct.builder().stringMember("foo").build(); + StructWithTimestamp structWithTimestamp = StructWithTimestamp.builder().nestedTimestamp(Instant.EPOCH).build(); + StructWithNestedBlobType structWithNestedBlob = StructWithNestedBlobType.builder().nestedBlob(blob).build(); + RecursiveStructType recursiveStruct = RecursiveStructType.builder() + .recursiveStruct(RecursiveStructType.builder().build()) + .build(); + BaseType baseType = BaseType.builder().baseMember("foo").build(); + SubTypeOne subtypeOne = SubTypeOne.builder().subTypeOneMember("foo").build(); + + validateJacksonSerialization(AllTypesRequest.builder() + .stringMember("foo") + .integerMember(5) + .booleanMember(true) + .floatMember(5F) + .doubleMember(5D) + .longMember(5L) + .simpleList("foo", "bar") + .listOfMaps(singletonList(singletonMap("foo", "bar"))) + .listOfStructs(simpleStruct) + .mapOfStringToIntegerList(singletonMap("foo", singletonList(5))) + .mapOfStringToStruct(singletonMap("foo", simpleStruct)) + .timestampMember(Instant.EPOCH) + .structWithNestedTimestampMember(structWithTimestamp) + .blobArg(blob) + .structWithNestedBlob(structWithNestedBlob) + .blobMap(singletonMap("foo", blob)) + .listOfBlobs(blob, blob) + .recursiveStruct(recursiveStruct) + .polymorphicTypeWithSubTypes(baseType) + .polymorphicTypeWithoutSubTypes(subtypeOne) + .enumMember("foo") + .listOfEnumsWithStrings("foo", "bar") + .mapOfEnumToEnumWithStrings(singletonMap("foo", "bar")) + .build()); + } + + private void validateJacksonSerialization(AllTypesRequest original) throws IOException { + SimpleModule instantModule = new SimpleModule(); + instantModule.addSerializer(Instant.class, new InstantSerializer()); + instantModule.addDeserializer(Instant.class, new InstantDeserializer()); + + ObjectMapper mapper = new ObjectMapper(); + mapper.registerModule(instantModule); + + String serialized = mapper.writeValueAsString(original.toBuilder()); + AllTypesRequest deserialized = mapper.readValue(serialized, AllTypesRequest.serializableBuilderClass()).build(); + assertThat(deserialized).isEqualTo(original); + + } + + private class InstantSerializer extends JsonSerializer { + @Override + public void serialize(Instant t, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) + throws IOException { + jsonGenerator.writeString(t.toString()); + } + } + + private class InstantDeserializer extends JsonDeserializer { + @Override + public Instant deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) throws IOException { + return Instant.parse(jsonParser.getText()); + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java new file mode 100644 index 000000000000..12541ba5e417 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java @@ -0,0 +1,247 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.time.Duration; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.ExecutableHttpRequest; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.model.EmptyModeledException; + +@RunWith(MockitoJUnitRunner.class) +public class CoreMetricsTest { + private static final String SERVICE_ID = "AmazonProtocolRestJson"; + private static final String REQUEST_ID = "req-id"; + private static final String EXTENDED_REQUEST_ID = "extended-id"; + private static final int MAX_RETRIES = 2; + + private static ProtocolRestJsonClient client; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Mock + private SdkHttpClient mockHttpClient; + + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + @Mock + private MetricPublisher mockPublisher; + + @Before + public void setup() throws IOException { + client = ProtocolRestJsonClient.builder() + .httpClient(mockHttpClient) + .credentialsProvider(mockCredentialsProvider) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .build(); + AbortableInputStream content = contentStream("{}"); + SdkHttpFullResponse httpResponse = SdkHttpFullResponse.builder() + .statusCode(200) + .putHeader("x-amz-request-id", REQUEST_ID) + .putHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .content(content) + .build(); + + HttpExecuteResponse mockResponse = mockExecuteResponse(httpResponse); + + ExecutableHttpRequest mockExecuteRequest = mock(ExecutableHttpRequest.class); + when(mockExecuteRequest.call()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return mockResponse; + }); + + when(mockHttpClient.prepareRequest(any(HttpExecuteRequest.class))) + .thenReturn(mockExecuteRequest); + + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + } + + @After + public void teardown() { + if (client != null) { + client.close(); + } + client = null; + } + + @Test + public void testApiCall_noConfiguredPublisher_succeeds() { + ProtocolRestJsonClient noPublisher = ProtocolRestJsonClient.builder() + .credentialsProvider(mockCredentialsProvider) + .httpClient(mockHttpClient) + .build(); + + noPublisher.allTypes(); + } + + @Test + public void testApiCall_publisherOverriddenOnRequest_requestPublisherTakesPrecedence() { + MetricPublisher requestMetricPublisher = mock(MetricPublisher.class); + + client.allTypes(r -> r.overrideConfiguration(o -> o.addMetricPublisher(requestMetricPublisher))); + + verify(requestMetricPublisher).publish(any(MetricCollection.class)); + verifyZeroInteractions(mockPublisher); + } + + @Test + public void testApiCall_operationSuccessful_addsMetrics() { + client.allTypes(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(mockPublisher).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + + assertThat(capturedCollection.name()).isEqualTo("ApiCall"); + assertThat(capturedCollection.metricValues(CoreMetric.SERVICE_ID)) + .containsExactly(SERVICE_ID); + assertThat(capturedCollection.metricValues(CoreMetric.OPERATION_NAME)) + .containsExactly("AllTypes"); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(true); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_DURATION).get(0)) + .isGreaterThan(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.CREDENTIALS_FETCH_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.MARSHALLING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(0); + + assertThat(capturedCollection.children()).hasSize(1); + MetricCollection attemptCollection = capturedCollection.children().get(0); + + assertThat(attemptCollection.name()).isEqualTo("ApiCallAttempt"); + assertThat(attemptCollection.metricValues(CoreMetric.BACKOFF_DELAY_DURATION)) + .containsExactly(Duration.ZERO); + assertThat(attemptCollection.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .containsExactly(200); + assertThat(attemptCollection.metricValues(CoreMetric.SIGNING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(attemptCollection.metricValues(CoreMetric.AWS_REQUEST_ID)) + .containsExactly(REQUEST_ID); + assertThat(attemptCollection.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) + .containsExactly(EXTENDED_REQUEST_ID); + assertThat(attemptCollection.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ofMillis(100)); + assertThat(attemptCollection.metricValues(CoreMetric.UNMARSHALLING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + } + + @Test + public void testApiCall_serviceReturnsError_errorInfoIncludedInMetrics() throws IOException { + AbortableInputStream content = contentStream("{}"); + + SdkHttpFullResponse httpResponse = SdkHttpFullResponse.builder() + .statusCode(500) + .putHeader("x-amz-request-id", REQUEST_ID) + .putHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .putHeader("X-Amzn-Errortype", "EmptyModeledException") + .content(content) + .build(); + + HttpExecuteResponse response = mockExecuteResponse(httpResponse); + + ExecutableHttpRequest mockExecuteRequest = mock(ExecutableHttpRequest.class); + when(mockExecuteRequest.call()).thenReturn(response); + + when(mockHttpClient.prepareRequest(any(HttpExecuteRequest.class))) + .thenReturn(mockExecuteRequest); + + thrown.expect(EmptyModeledException.class); + try { + client.allTypes(); + } finally { + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(mockPublisher).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + + assertThat(capturedCollection.children()).hasSize(MAX_RETRIES + 1); + assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(MAX_RETRIES); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(false); + + for (MetricCollection requestMetrics : capturedCollection.children()) { + // A service exception is still a successful HTTP execution so + // we should still have HTTP metrics as well. + assertThat(requestMetrics.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .containsExactly(500); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_REQUEST_ID)) + .containsExactly(REQUEST_ID); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) + .containsExactly(EXTENDED_REQUEST_ID); + assertThat(requestMetrics.metricValues(CoreMetric.SERVICE_CALL_DURATION)).hasOnlyOneElementSatisfying(d -> { + assertThat(d).isGreaterThanOrEqualTo(Duration.ZERO); + }); + assertThat(requestMetrics.metricValues(CoreMetric.UNMARSHALLING_DURATION)).hasOnlyOneElementSatisfying(d -> { + assertThat(d).isGreaterThanOrEqualTo(Duration.ZERO); + }); + } + } + } + + private static HttpExecuteResponse mockExecuteResponse(SdkHttpFullResponse httpResponse) { + HttpExecuteResponse mockResponse = mock(HttpExecuteResponse.class); + when(mockResponse.httpResponse()).thenReturn(httpResponse); + when(mockResponse.responseBody()).thenReturn(httpResponse.content()); + return mockResponse; + } + + private static AbortableInputStream contentStream(String content) { + ByteArrayInputStream baos = new ByteArrayInputStream(content.getBytes()); + return AbortableInputStream.create(baos); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/SyncClientMetricPublisherResolutionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/SyncClientMetricPublisherResolutionTest.java new file mode 100644 index 000000000000..9006ec7d6c44 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/SyncClientMetricPublisherResolutionTest.java @@ -0,0 +1,176 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.Arrays; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.http.ExecutableHttpRequest; +import software.amazon.awssdk.http.HttpExecuteRequest; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClientBuilder; + +@RunWith(MockitoJUnitRunner.class) +public class SyncClientMetricPublisherResolutionTest { + + @Mock + private SdkHttpClient mockHttpClient; + + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + private ProtocolRestJsonClient client; + + @After + public void teardown() { + if (client != null) { + client.close(); + } + + client = null; + } + + @Test + public void testApiCall_noPublishersSet_noException() throws IOException { + client = clientWithPublishers(); + client.allTypes(); + } + + @Test + public void testApiCall_publishersSetOnClient_clientPublishersInvoked() throws IOException { + MetricPublisher publisher1 = mock(MetricPublisher.class); + MetricPublisher publisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(publisher1, publisher2); + + try { + client.allTypes(); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(publisher1).publish(any(MetricCollection.class)); + verify(publisher2).publish(any(MetricCollection.class)); + } + } + + @Test + public void testApiCall_publishersSetOnRequest_requestPublishersInvoked() throws IOException { + MetricPublisher publisher1 = mock(MetricPublisher.class); + MetricPublisher publisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(); + + try { + client.allTypes(r -> r.overrideConfiguration(o -> + o.addMetricPublisher(publisher1).addMetricPublisher(publisher2))); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(publisher1).publish(any(MetricCollection.class)); + verify(publisher2).publish(any(MetricCollection.class)); + } + } + + @Test + public void testApiCall_publishersSetOnClientAndRequest_requestPublishersInvoked() throws IOException { + MetricPublisher clientPublisher1 = mock(MetricPublisher.class); + MetricPublisher clientPublisher2 = mock(MetricPublisher.class); + + MetricPublisher requestPublisher1 = mock(MetricPublisher.class); + MetricPublisher requestPublisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(clientPublisher1, clientPublisher2); + + try { + client.allTypes(r -> r.overrideConfiguration(o -> + o.addMetricPublisher(requestPublisher1).addMetricPublisher(requestPublisher2))); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(requestPublisher1).publish(any(MetricCollection.class)); + verify(requestPublisher2).publish(any(MetricCollection.class)); + verifyZeroInteractions(clientPublisher1); + verifyZeroInteractions(clientPublisher2); + } + } + + private ProtocolRestJsonClient clientWithPublishers(MetricPublisher... metricPublishers) throws IOException { + ProtocolRestJsonClientBuilder builder = ProtocolRestJsonClient.builder() + .httpClient(mockHttpClient) + .credentialsProvider(mockCredentialsProvider); + + AbortableInputStream content = AbortableInputStream.create(new ByteArrayInputStream("{}".getBytes())); + SdkHttpFullResponse httpResponse = SdkHttpFullResponse.builder() + .statusCode(200) + .content(content) + .build(); + + HttpExecuteResponse mockResponse = mockExecuteResponse(httpResponse); + + ExecutableHttpRequest mockExecuteRequest = mock(ExecutableHttpRequest.class); + when(mockExecuteRequest.call()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return mockResponse; + }); + + when(mockHttpClient.prepareRequest(any(HttpExecuteRequest.class))) + .thenReturn(mockExecuteRequest); + + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + + if (metricPublishers != null) { + builder.overrideConfiguration(o -> o.metricPublishers(Arrays.asList(metricPublishers))); + } + + return builder.build(); + } + + private static HttpExecuteResponse mockExecuteResponse(SdkHttpFullResponse httpResponse) { + HttpExecuteResponse mockResponse = mock(HttpExecuteResponse.class); + when(mockResponse.httpResponse()).thenReturn(httpResponse); + when(mockResponse.responseBody()).thenReturn(httpResponse.content()); + return mockResponse; + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncClientMetricPublisherResolutionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncClientMetricPublisherResolutionTest.java new file mode 100644 index 000000000000..64ea187f46f8 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncClientMetricPublisherResolutionTest.java @@ -0,0 +1,160 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics.async; + +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.io.IOException; +import java.net.URI; +import java.util.Arrays; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClientBuilder; +import software.amazon.awssdk.services.protocolrestjson.model.ProtocolRestJsonException; + +@RunWith(MockitoJUnitRunner.class) +public class AsyncClientMetricPublisherResolutionTest { + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + private ProtocolRestJsonAsyncClient client; + + + @Before + public void setup() { + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + } + + @After + public void teardown() { + wireMock.resetAll(); + if (client != null) { + client.close(); + } + client = null; + } + + @Test + public void testApiCall_noPublishersSet_noNpe() { + client = clientWithPublishers(); + // This is thrown because all the requests to our wiremock are + // nonsense, it's just important that we don't get NPE because we + // don't have publishers set + thrown.expectCause(instanceOf(ProtocolRestJsonException.class)); + client.allTypes().join(); + } + + @Test + public void testApiCall_publishersSetOnClient_clientPublishersInvoked() throws IOException { + MetricPublisher publisher1 = mock(MetricPublisher.class); + MetricPublisher publisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(publisher1, publisher2); + + try { + client.allTypes().join(); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(publisher1).publish(any(MetricCollection.class)); + verify(publisher2).publish(any(MetricCollection.class)); + } + } + + @Test + public void testApiCall_publishersSetOnRequest_requestPublishersInvoked() throws IOException { + MetricPublisher publisher1 = mock(MetricPublisher.class); + MetricPublisher publisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(); + + try { + client.allTypes(r -> r.overrideConfiguration(o -> + o.addMetricPublisher(publisher1).addMetricPublisher(publisher2))) + .join(); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(publisher1).publish(any(MetricCollection.class)); + verify(publisher2).publish(any(MetricCollection.class)); + } + } + + @Test + public void testApiCall_publishersSetOnClientAndRequest_requestPublishersInvoked() throws IOException { + MetricPublisher clientPublisher1 = mock(MetricPublisher.class); + MetricPublisher clientPublisher2 = mock(MetricPublisher.class); + + MetricPublisher requestPublisher1 = mock(MetricPublisher.class); + MetricPublisher requestPublisher2 = mock(MetricPublisher.class); + + client = clientWithPublishers(clientPublisher1, clientPublisher2); + + try { + client.allTypes(r -> r.overrideConfiguration(o -> + o.addMetricPublisher(requestPublisher1).addMetricPublisher(requestPublisher2))) + .join(); + } catch (Throwable t) { + // ignored, call fails because our mock HTTP client isn't set up + } finally { + verify(requestPublisher1).publish(any(MetricCollection.class)); + verify(requestPublisher2).publish(any(MetricCollection.class)); + verifyZeroInteractions(clientPublisher1); + verifyZeroInteractions(clientPublisher2); + } + } + + private ProtocolRestJsonAsyncClient clientWithPublishers(MetricPublisher... metricPublishers) { + ProtocolRestJsonAsyncClientBuilder builder = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(mockCredentialsProvider) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())); + + if (metricPublishers != null) { + builder.overrideConfiguration(o -> o.metricPublishers(Arrays.asList(metricPublishers))); + } + + return builder.build(); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncCoreMetricsTest.java new file mode 100644 index 000000000000..1a0852da47af --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncCoreMetricsTest.java @@ -0,0 +1,123 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics.async; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; + +/** + * Core metrics test for async non-streaming API + */ +@RunWith(MockitoJUnitRunner.class) +public class AsyncCoreMetricsTest extends BaseAsyncCoreMetricsTest { + + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + @Mock + private MetricPublisher mockPublisher; + + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + private ProtocolRestJsonAsyncClient client; + + + @Before + public void setup() throws IOException { + client = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(mockCredentialsProvider) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .build(); + + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + } + + @After + public void teardown() { + wireMock.resetAll(); + if (client != null) { + client.close(); + } + client = null; + } + + @Override + String operationName() { + return "AllTypes"; + } + + @Override + Supplier> callable() { + return () -> client.allTypes(); + } + + @Override + MetricPublisher publisher() { + return mockPublisher; + } + + @Test + public void apiCall_noConfiguredPublisher_succeeds() { + stubSuccessfulResponse(); + ProtocolRestJsonAsyncClient noPublisher = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(mockCredentialsProvider) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .build(); + + noPublisher.allTypes().join(); + } + + @Test + public void apiCall_publisherOverriddenOnRequest_requestPublisherTakesPrecedence() { + stubSuccessfulResponse(); + MetricPublisher requestMetricPublisher = mock(MetricPublisher.class); + + client.allTypes(r -> r.overrideConfiguration(o -> o.addMetricPublisher(requestMetricPublisher))).join(); + + verify(requestMetricPublisher).publish(any(MetricCollection.class)); + verifyZeroInteractions(mockPublisher); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncEventStreamingCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncEventStreamingCoreMetricsTest.java new file mode 100644 index 000000000000..8641e9db4e39 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncEventStreamingCoreMetricsTest.java @@ -0,0 +1,102 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics.async; + +import static org.mockito.Mockito.when; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.net.URI; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.async.EmptyPublisher; +import software.amazon.awssdk.core.signer.NoOpSigner; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.model.EventStreamOperationRequest; +import software.amazon.awssdk.services.protocolrestjson.model.EventStreamOperationResponseHandler; + +/** + * Core metrics test for async streaming API + */ +@RunWith(MockitoJUnitRunner.class) +public class AsyncEventStreamingCoreMetricsTest extends BaseAsyncCoreMetricsTest { + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + @Mock + private MetricPublisher mockPublisher; + + + private ProtocolRestJsonAsyncClient client; + + @Before + public void setup() { + client = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(mockCredentialsProvider) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher) + .retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .build(); + + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + } + + @After + public void teardown() { + wireMock.resetAll(); + if (client != null) { + client.close(); + } + client = null; + } + + @Override + String operationName() { + return "EventStreamOperation"; + } + + @Override + Supplier> callable() { + return () -> client.eventStreamOperation(EventStreamOperationRequest.builder().overrideConfiguration(b -> b.signer(new NoOpSigner())).build(), + new EmptyPublisher<>(), + EventStreamOperationResponseHandler.builder() + .subscriber(b -> {}) + .build()); + } + + @Override + MetricPublisher publisher() { + return mockPublisher; + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncStreamingCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncStreamingCoreMetricsTest.java new file mode 100644 index 000000000000..5b6b148046f1 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/AsyncStreamingCoreMetricsTest.java @@ -0,0 +1,97 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics.async; + +import static org.mockito.Mockito.when; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import java.io.IOException; +import java.net.URI; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.model.StreamingInputOperationRequest; + +/** + * Core metrics test for async streaming API + */ +@RunWith(MockitoJUnitRunner.class) +public class AsyncStreamingCoreMetricsTest extends BaseAsyncCoreMetricsTest { + + @Mock + private AwsCredentialsProvider mockCredentialsProvider; + + @Mock + private MetricPublisher mockPublisher; + + @Rule + public WireMockRule wireMock = new WireMockRule(0); + + private ProtocolRestJsonAsyncClient client; + + @Before + public void setup() throws IOException { + client = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(mockCredentialsProvider) + .endpointOverride(URI.create("http://localhost:" + wireMock.port())) + .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .build(); + + when(mockCredentialsProvider.resolveCredentials()).thenAnswer(invocation -> { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + return AwsBasicCredentials.create("foo", "bar"); + }); + } + + @After + public void teardown() { + wireMock.resetAll(); + if (client != null) { + client.close(); + } + client = null; + } + + @Override + String operationName() { + return "StreamingInputOperation"; + } + + @Override + Supplier> callable() { + return () -> client.streamingInputOperation(StreamingInputOperationRequest.builder().build(), + AsyncRequestBody.fromBytes("helloworld".getBytes())); + } + + @Override + MetricPublisher publisher() { + return mockPublisher; + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java new file mode 100644 index 000000000000..386e3cc4860d --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java @@ -0,0 +1,254 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.metrics.async; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl; +import static com.github.tomakehurst.wiremock.client.WireMock.post; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.verify; + +import com.github.tomakehurst.wiremock.http.Fault; +import com.github.tomakehurst.wiremock.stubbing.Scenario; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.metrics.MetricCollection; +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.protocolrestjson.model.EmptyModeledException; + +@RunWith(MockitoJUnitRunner.class) +public abstract class BaseAsyncCoreMetricsTest { + private static final String SERVICE_ID = "AmazonProtocolRestJson"; + private static final String REQUEST_ID = "req-id"; + private static final String EXTENDED_REQUEST_ID = "extended-id"; + static final int MAX_RETRIES = 2; + public static final Duration FIXED_DELAY = Duration.ofMillis(500); + + @Test + public void apiCall_operationSuccessful_addsMetrics() { + stubSuccessfulResponse(); + callable().get().join(); + addDelayIfNeeded(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(publisher()).publish(collectionCaptor.capture()); + MetricCollection capturedCollection = collectionCaptor.getValue(); + + verifySuccessfulApiCallCollection(capturedCollection); + + assertThat(capturedCollection.children()).hasSize(1); + MetricCollection attemptCollection = capturedCollection.children().get(0); + + assertThat(attemptCollection.name()).isEqualTo("ApiCallAttempt"); + + verifySuccessfulApiCallAttemptCollection(attemptCollection); + assertThat(attemptCollection.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) + .isGreaterThanOrEqualTo(FIXED_DELAY); + } + + @Test + public void apiCall_allRetryAttemptsFailedOf500() { + stubErrorResponse(); + assertThatThrownBy(() -> callable().get().join()).hasCauseInstanceOf(EmptyModeledException.class); + addDelayIfNeeded(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(publisher()).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + verifyFailedApiCallCollection(capturedCollection); + assertThat(capturedCollection.children()).hasSize(MAX_RETRIES + 1); + + capturedCollection.children().forEach(this::verifyFailedApiCallAttemptCollection); + } + + @Test + public void apiCall_allRetryAttemptsFailedOfNetworkError() { + stubNetworkError(); + assertThatThrownBy(() -> callable().get().join()).hasCauseInstanceOf(SdkClientException.class); + addDelayIfNeeded(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(publisher()).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + verifyFailedApiCallCollection(capturedCollection); + assertThat(capturedCollection.children()).hasSize(MAX_RETRIES + 1); + + capturedCollection.children().forEach(requestMetrics -> { + assertThat(requestMetrics.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .isEmpty(); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_REQUEST_ID)) + .isEmpty(); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) + .isEmpty(); + assertThat(requestMetrics.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) + .isGreaterThanOrEqualTo(FIXED_DELAY); + }); + } + + @Test + public void apiCall_firstAttemptFailedRetrySucceeded() { + stubSuccessfulRetry(); + callable().get().join(); + addDelayIfNeeded(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(publisher()).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + verifyApiCallCollection(capturedCollection); + assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(1); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(true); + + assertThat(capturedCollection.children()).hasSize(2); + + MetricCollection failedAttempt = capturedCollection.children().get(0); + verifyFailedApiCallAttemptCollection(failedAttempt); + + MetricCollection successfulAttempt = capturedCollection.children().get(1); + verifySuccessfulApiCallAttemptCollection(successfulAttempt); + } + + /** + * Adds delay after calling CompletableFuture.join to wait for publisher to get metrics. + */ + void addDelayIfNeeded() { + try { + Thread.sleep(100); + } catch (InterruptedException ie) { + ie.printStackTrace(); + } + } + + abstract String operationName(); + + abstract Supplier> callable(); + + abstract MetricPublisher publisher(); + + private void verifyFailedApiCallAttemptCollection(MetricCollection requestMetrics) { + assertThat(requestMetrics.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .containsExactly(500); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_REQUEST_ID)) + .containsExactly(REQUEST_ID); + assertThat(requestMetrics.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) + .containsExactly(EXTENDED_REQUEST_ID); + assertThat(requestMetrics.metricValues(CoreMetric.BACKOFF_DELAY_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(requestMetrics.metricValues(CoreMetric.SERVICE_CALL_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + } + + private void verifySuccessfulApiCallAttemptCollection(MetricCollection attemptCollection) { + assertThat(attemptCollection.metricValues(HttpMetric.HTTP_STATUS_CODE)) + .containsExactly(200); + assertThat(attemptCollection.metricValues(CoreMetric.AWS_REQUEST_ID)) + .containsExactly(REQUEST_ID); + assertThat(attemptCollection.metricValues(CoreMetric.AWS_EXTENDED_REQUEST_ID)) + .containsExactly(EXTENDED_REQUEST_ID); + assertThat(attemptCollection.metricValues(CoreMetric.BACKOFF_DELAY_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(attemptCollection.metricValues(CoreMetric.SIGNING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + } + + private void verifyFailedApiCallCollection(MetricCollection capturedCollection) { + verifyApiCallCollection(capturedCollection); + assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(MAX_RETRIES); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(false); + } + + private void verifySuccessfulApiCallCollection(MetricCollection capturedCollection) { + verifyApiCallCollection(capturedCollection); + assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(0); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_SUCCESSFUL)).containsExactly(true); + } + + private void verifyApiCallCollection(MetricCollection capturedCollection) { + assertThat(capturedCollection.name()).isEqualTo("ApiCall"); + assertThat(capturedCollection.metricValues(CoreMetric.SERVICE_ID)) + .containsExactly(SERVICE_ID); + assertThat(capturedCollection.metricValues(CoreMetric.OPERATION_NAME)) + .containsExactly(operationName()); + assertThat(capturedCollection.metricValues(CoreMetric.CREDENTIALS_FETCH_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.MARSHALLING_DURATION).get(0)) + .isGreaterThanOrEqualTo(Duration.ZERO); + assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_DURATION).get(0)) + .isGreaterThan(FIXED_DELAY); + } + + void stubSuccessfulResponse() { + stubFor(post(anyUrl()) + .willReturn(aResponse().withStatus(200) + .withHeader("x-amz-request-id", REQUEST_ID) + .withFixedDelay((int) FIXED_DELAY.toMillis()) + .withHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .withBody("{}"))); + } + + void stubErrorResponse() { + stubFor(post(anyUrl()) + .willReturn(aResponse().withStatus(500) + .withHeader("x-amz-request-id", REQUEST_ID) + .withHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .withFixedDelay((int) FIXED_DELAY.toMillis()) + .withHeader("X-Amzn-Errortype", "EmptyModeledException") + .withBody("{}"))); + } + + void stubNetworkError() { + stubFor(post(anyUrl()) + .willReturn(aResponse().withFault(Fault.CONNECTION_RESET_BY_PEER) + .withFixedDelay((int) FIXED_DELAY.toMillis()) + )); + } + + void stubSuccessfulRetry() { + stubFor(post(anyUrl()) + .inScenario("retry at 500") + .whenScenarioStateIs(Scenario.STARTED) + .willSetStateTo("first attempt") + .willReturn(aResponse() + .withHeader("x-amz-request-id", REQUEST_ID) + .withHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .withFixedDelay((int) FIXED_DELAY.toMillis()) + .withHeader("X-Amzn-Errortype", "EmptyModeledException") + .withStatus(500))); + + stubFor(post(anyUrl()) + .inScenario("retry at 500") + .whenScenarioStateIs("first attempt") + .willSetStateTo("second attempt") + .willReturn(aResponse() + .withStatus(200) + .withHeader("x-amz-request-id", REQUEST_ID) + .withHeader("x-amz-id-2", EXTENDED_REQUEST_ID) + .withFixedDelay((int) FIXED_DELAY.toMillis()) + .withBody("{}"))); + } +} diff --git a/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body.gz b/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body.gz new file mode 100644 index 000000000000..5e0eb47d6729 Binary files /dev/null and b/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body.gz differ diff --git a/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body_with_extra_data.gz b/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body_with_extra_data.gz new file mode 100644 index 000000000000..6dfa2fe08732 Binary files /dev/null and b/test/codegen-generated-classes-test/src/test/resources/__files/compressed_json_body_with_extra_data.gz differ diff --git a/test/codegen-generated-classes-test/src/test/resources/jetty-logging.properties b/test/codegen-generated-classes-test/src/test/resources/jetty-logging.properties new file mode 100644 index 000000000000..4ee410e7fa92 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/resources/jetty-logging.properties @@ -0,0 +1,18 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +# Set up logging implementation +org.eclipse.jetty.util.log.class=org.eclipse.jetty.util.log.StdErrLog +org.eclipse.jetty.LEVEL=OFF diff --git a/test/codegen-generated-classes-test/src/test/resources/log4j.properties b/test/codegen-generated-classes-test/src/test/resources/log4j.properties new file mode 100644 index 000000000000..2f52be5df856 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/resources/log4j.properties @@ -0,0 +1,33 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +log4j.rootLogger=ERROR, A1 +log4j.appender.A1=org.apache.log4j.ConsoleAppender +log4j.appender.A1.layout=org.apache.log4j.PatternLayout + +# Print the date in ISO 8601 format +log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n + +# Adjust to see more / less logging +#log4j.logger.com.amazonaws.ec2=DEBUG + +# HttpClient 3 Wire Logging +#log4j.logger.httpclient.wire=DEBUG + +# HttpClient 4 Wire Logging +# log4j.logger.org.apache.http.wire=INFO +# log4j.logger.org.apache.http=DEBUG +# log4j.logger.org.apache.http.wire=DEBUG +# log4j.logger.software.amazon.awssdk=DEBUG diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index d57835fbb86c..8eaf230be0af 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../../pom.xml http-client-tests @@ -73,5 +73,30 @@ wiremock compile + + io.reactivex.rxjava2 + rxjava + compile + + + io.netty + netty-codec-http + + + io.netty + netty-transport + + + io.netty + netty-common + + + io.netty + netty-buffer + + + io.netty + netty-handler +
diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/EmptyPublisher.java b/test/http-client-tests/src/main/java/software/amazon/awssdk/http/EmptyPublisher.java similarity index 97% rename from http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/EmptyPublisher.java rename to test/http-client-tests/src/main/java/software/amazon/awssdk/http/EmptyPublisher.java index 1f1308a2f07f..fa5728d8e6f5 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/EmptyPublisher.java +++ b/test/http-client-tests/src/main/java/software/amazon/awssdk/http/EmptyPublisher.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.http.nio.netty; +package software.amazon.awssdk.http; import java.nio.ByteBuffer; import java.util.Optional; diff --git a/test/http-client-tests/src/main/java/software/amazon/awssdk/http/H1ServerBehaviorTestBase.java b/test/http-client-tests/src/main/java/software/amazon/awssdk/http/H1ServerBehaviorTestBase.java new file mode 100644 index 000000000000..7f72233a3c01 --- /dev/null +++ b/test/http-client-tests/src/main/java/software/amazon/awssdk/http/H1ServerBehaviorTestBase.java @@ -0,0 +1,170 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.http; + +import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION; +import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; +import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; +import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE; +import static io.netty.handler.codec.http.HttpHeaderValues.TEXT_PLAIN; +import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR; +import static io.netty.handler.codec.http.HttpResponseStatus.OK; +import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat; + +import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.ServerSocketChannel; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpServerCodec; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.util.SelfSignedCertificate; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; + +/** + * Testing how clients react to various h1 server behaviors + */ +public abstract class H1ServerBehaviorTestBase { + private Server server; + + protected abstract SdkAsyncHttpClient getTestClient(); + + public void setup() throws Exception { + server = new Server(); + server.init(); + } + + public void teardown() throws InterruptedException { + if (server != null) { + server.shutdown(); + } + server = null; + } + + public void connectionReceiveServerErrorStatusShouldNotReuseConnection() { + server.return500OnFirstRequest = true; + server.closeConnection = false; + + HttpTestUtils.sendGetRequest(server.port(), getTestClient()).join(); + HttpTestUtils.sendGetRequest(server.port(), getTestClient()).join(); + assertThat(server.channels.size()).isEqualTo(2); + } + + public void connectionReceiveOkStatusShouldReuseConnection() { + server.return500OnFirstRequest = false; + server.closeConnection = false; + + HttpTestUtils.sendGetRequest(server.port(), getTestClient()).join(); + HttpTestUtils.sendGetRequest(server.port(), getTestClient()).join(); + assertThat(server.channels.size()).isEqualTo(1); + } + + public void connectionReceiveCloseHeaderShouldNotReuseConnection() throws InterruptedException { + server.return500OnFirstRequest = false; + server.closeConnection = true; + + HttpTestUtils.sendGetRequest(server.port(), getTestClient()).join(); + Thread.sleep(1000); + + HttpTestUtils.sendGetRequest(server.port(), getTestClient()).join(); + assertThat(server.channels.size()).isEqualTo(2); + } + + private static class Server extends ChannelInitializer { + private static final byte[] CONTENT = "helloworld".getBytes(StandardCharsets.UTF_8); + private ServerBootstrap bootstrap; + private ServerSocketChannel serverSock; + private List channels = new ArrayList<>(); + private final NioEventLoopGroup group = new NioEventLoopGroup(); + private SslContext sslCtx; + private boolean return500OnFirstRequest; + private boolean closeConnection; + + public void init() throws Exception { + SelfSignedCertificate ssc = new SelfSignedCertificate(); + sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); + + bootstrap = new ServerBootstrap() + .channel(NioServerSocketChannel.class) + .handler(new LoggingHandler(LogLevel.DEBUG)) + .group(group) + .childHandler(this); + + serverSock = (ServerSocketChannel) bootstrap.bind(0).sync().channel(); + } + + @Override + protected void initChannel(SocketChannel ch) throws Exception { + channels.add(ch); + ChannelPipeline pipeline = ch.pipeline(); + pipeline.addLast(sslCtx.newHandler(ch.alloc())); + pipeline.addLast(new HttpServerCodec()); + pipeline.addLast(new BehaviorTestChannelHandler()); + } + + public void shutdown() throws InterruptedException { + group.shutdownGracefully().await(); + } + + public int port() { + return serverSock.localAddress().getPort(); + } + + private class BehaviorTestChannelHandler extends ChannelDuplexHandler { + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + if (msg instanceof HttpRequest) { + HttpResponseStatus status; + if (ctx.channel().equals(channels.get(0)) && return500OnFirstRequest) { + status = INTERNAL_SERVER_ERROR; + } else { + status = OK; + } + + FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status, + Unpooled.wrappedBuffer(CONTENT)); + + response.headers() + .set(CONTENT_TYPE, TEXT_PLAIN) + .setInt(CONTENT_LENGTH, response.content().readableBytes()); + + if (closeConnection) { + response.headers().set(CONNECTION, CLOSE); + } + + ctx.writeAndFlush(response); + } + } + } + } +} diff --git a/test/http-client-tests/src/main/java/software/amazon/awssdk/http/HttpTestUtils.java b/test/http-client-tests/src/main/java/software/amazon/awssdk/http/HttpTestUtils.java index e858a6bc145f..ed14c8b78423 100644 --- a/test/http-client-tests/src/main/java/software/amazon/awssdk/http/HttpTestUtils.java +++ b/test/http-client-tests/src/main/java/software/amazon/awssdk/http/HttpTestUtils.java @@ -18,9 +18,16 @@ import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; import com.github.tomakehurst.wiremock.WireMockServer; +import io.reactivex.Flowable; import java.io.InputStream; import java.net.URL; +import java.nio.ByteBuffer; import java.security.KeyStore; +import java.util.concurrent.CompletableFuture; +import org.reactivestreams.Publisher; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; public class HttpTestUtils { private HttpTestUtils() { @@ -46,4 +53,36 @@ public static KeyStore getSelfSignedKeyStore() throws Exception { return keyStore; } + + public static CompletableFuture sendGetRequest(int serverPort, SdkAsyncHttpClient client) { + AsyncExecuteRequest req = AsyncExecuteRequest.builder() + .responseHandler(new SdkAsyncHttpResponseHandler() { + private SdkHttpResponse headers; + + @Override + public void onHeaders(SdkHttpResponse headers) { + this.headers = headers; + } + + @Override + public void onStream(Publisher stream) { + Flowable.fromPublisher(stream).forEach(b -> { + }); + } + + @Override + public void onError(Throwable error) { + } + }) + .request(SdkHttpFullRequest.builder() + .method(SdkHttpMethod.GET) + .protocol("https") + .host("localhost") + .port(serverPort) + .build()) + .requestContentPublisher(new EmptyPublisher()) + .build(); + + return client.execute(req); + } } diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index 8bdfb31331ba..d07054d574ec 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index 3d9fbeb4012a..ceffc3ff6815 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 01c2f09d131d..9ffb02d8a98b 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../../pom.xml 4.0.0 @@ -83,6 +83,11 @@ http-client-spi ${awsjavasdk.version} + + software.amazon.awssdk + metrics-spi + ${awsjavasdk.version} + software.amazon.awssdk sdk-core diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java index 67e6a3b1bae6..da9c40cee1b7 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/AsyncResponseThreadingTest.java @@ -21,6 +21,7 @@ import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.verify; import static software.amazon.awssdk.core.client.config.SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR; @@ -62,7 +63,8 @@ public void completionWithNioThreadWorksCorrectly() { client.streamingOutputOperation(StreamingOutputOperationRequest.builder().build(), AsyncResponseTransformer.toBytes()).join(); - verify(mockExecutor).execute(any()); + // #1 reporting metrics, #2 completing response + verify(mockExecutor, atLeast(1)).execute(any()); byte[] arrayCopy = response.asByteArray(); assertThat(arrayCopy).containsExactly('t', 'e', 's', 't'); diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/AwsJsonExceptionTest.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/AwsJsonExceptionTest.java index a988c735bb1c..7797845a9168 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/AwsJsonExceptionTest.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/AwsJsonExceptionTest.java @@ -126,6 +126,29 @@ public void modeledException_HasExceptionMetadataSet() { assertThat(awsErrorDetails.serviceName()).isEqualTo("ProtocolJsonRpc"); assertThat(awsErrorDetails.sdkHttpResponse()).isNotNull(); assertThat(e.requestId()).isEqualTo("1234"); + assertThat(e.extendedRequestId()).isNull(); + assertThat(e.statusCode()).isEqualTo(404); + } + } + + @Test + public void modeledException_HasExceptionMetadataIncludingExtendedRequestIdSet() { + stubFor(post(urlEqualTo(PATH)).willReturn( + aResponse() + .withStatus(404) + .withHeader("x-amzn-RequestId", "1234") + .withHeader("x-amz-id-2", "5678") + .withBody("{\"__type\": \"EmptyModeledException\", \"Message\": \"This is the service message\"}"))); + try { + client.allTypes(); + } catch (EmptyModeledException e) { + AwsErrorDetails awsErrorDetails = e.awsErrorDetails(); + assertThat(awsErrorDetails.errorCode()).isEqualTo("EmptyModeledException"); + assertThat(awsErrorDetails.errorMessage()).isEqualTo("This is the service message"); + assertThat(awsErrorDetails.serviceName()).isEqualTo("ProtocolJsonRpc"); + assertThat(awsErrorDetails.sdkHttpResponse()).isNotNull(); + assertThat(e.requestId()).isEqualTo("1234"); + assertThat(e.extendedRequestId()).isEqualTo("5678"); assertThat(e.statusCode()).isEqualTo(404); } } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/QueryExceptionTests.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/QueryExceptionTests.java index 9d3f75ebf8b0..1470d492041c 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/QueryExceptionTests.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/QueryExceptionTests.java @@ -231,6 +231,7 @@ public void modeledException_HasExceptionMetadataSet() { assertThat(awsErrorDetails.serviceName()).isEqualTo("ProtocolQuery"); assertThat(awsErrorDetails.sdkHttpResponse()).isNotNull(); assertThat(e.requestId()).isEqualTo("1234"); + assertThat(e.extendedRequestId()).isNull(); assertThat(e.statusCode()).isEqualTo(404); } } @@ -252,6 +253,7 @@ public void modeledException_RequestIDInXml_SetCorrectly() { client.allTypes(); } catch (EmptyModeledException e) { assertThat(e.requestId()).isEqualTo("1234"); + assertThat(e.extendedRequestId()).isNull(); } } @@ -265,6 +267,22 @@ public void requestIdInHeader_IsSetOnException() { client.allTypes(); } catch (ProtocolQueryException e) { assertThat(e.requestId()).isEqualTo("1234"); + assertThat(e.extendedRequestId()).isNull(); + } + } + + @Test + public void requestIdAndExtendedRequestIdInHeader_IsSetOnException() { + stubFor(post(urlEqualTo(PATH)).willReturn( + aResponse() + .withStatus(404) + .withHeader("x-amzn-RequestId", "1234") + .withHeader("x-amz-id-2", "5678"))); + try { + client.allTypes(); + } catch (ProtocolQueryException e) { + assertThat(e.requestId()).isEqualTo("1234"); + assertThat(e.extendedRequestId()).isEqualTo("5678"); } } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/RestJsonExceptionTests.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/RestJsonExceptionTests.java index 5a82e4ff0bd5..818bd1c4a8e6 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/RestJsonExceptionTests.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/RestJsonExceptionTests.java @@ -158,6 +158,29 @@ public void modeledException_HasExceptionMetadataSet() { assertThat(awsErrorDetails.serviceName()).isEqualTo("ProtocolRestJson"); assertThat(awsErrorDetails.sdkHttpResponse()).isNotNull(); assertThat(e.requestId()).isEqualTo("1234"); + assertThat(e.extendedRequestId()).isNull(); + assertThat(e.statusCode()).isEqualTo(404); + } + } + + @Test + public void modeledException_HasExceptionMetadataIncludingExtendedRequestIdSet() { + stubFor(post(urlEqualTo(ALL_TYPES_PATH)).willReturn( + aResponse() + .withStatus(404) + .withHeader("x-amzn-RequestId", "1234") + .withHeader("x-amz-id-2", "5678") + .withBody("{\"__type\": \"EmptyModeledException\", \"Message\": \"This is the service message\"}"))); + try { + client.allTypes(); + } catch (EmptyModeledException e) { + AwsErrorDetails awsErrorDetails = e.awsErrorDetails(); + assertThat(awsErrorDetails.errorCode()).isEqualTo("EmptyModeledException"); + assertThat(awsErrorDetails.errorMessage()).isEqualTo("This is the service message"); + assertThat(awsErrorDetails.serviceName()).isEqualTo("ProtocolRestJson"); + assertThat(awsErrorDetails.sdkHttpResponse()).isNotNull(); + assertThat(e.requestId()).isEqualTo("1234"); + assertThat(e.extendedRequestId()).isEqualTo("5678"); assertThat(e.statusCode()).isEqualTo(404); } } diff --git a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/RestXmlExceptionTests.java b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/RestXmlExceptionTests.java index b4d53596fa3d..99dc7a2b97a9 100644 --- a/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/RestXmlExceptionTests.java +++ b/test/protocol-tests/src/test/java/software/amazon/awssdk/protocol/tests/exception/RestXmlExceptionTests.java @@ -155,6 +155,35 @@ public void modeledException_HasExceptionMetadataSet() { assertThat(awsErrorDetails.serviceName()).isEqualTo("ProtocolRestXml"); assertThat(awsErrorDetails.sdkHttpResponse()).isNotNull(); assertThat(e.requestId()).isEqualTo("1234"); + assertThat(e.extendedRequestId()).isNull(); + assertThat(e.statusCode()).isEqualTo(404); + } + } + + @Test + public void modeledException_HasExceptionMetadataIncludingExtendedRequestIdSet() { + String xml = "" + + " " + + " EmptyModeledException" + + " This is the service message" + + " " + + " 1234" + + ""; + stubFor(post(urlEqualTo(ALL_TYPES_PATH)).willReturn( + aResponse() + .withStatus(404) + .withHeader("x-amz-id-2", "5678") + .withBody(xml))); + try { + client.allTypes(); + } catch (EmptyModeledException e) { + AwsErrorDetails awsErrorDetails = e.awsErrorDetails(); + assertThat(awsErrorDetails.errorCode()).isEqualTo("EmptyModeledException"); + assertThat(awsErrorDetails.errorMessage()).isEqualTo("This is the service message"); + assertThat(awsErrorDetails.serviceName()).isEqualTo("ProtocolRestXml"); + assertThat(awsErrorDetails.sdkHttpResponse()).isNotNull(); + assertThat(e.requestId()).isEqualTo("1234"); + assertThat(e.extendedRequestId()).isEqualTo("5678"); assertThat(e.statusCode()).isEqualTo(404); } } diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index b7009d7935c8..6f135c9e42d6 100755 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../../pom.xml @@ -137,8 +137,7 @@ software.amazon.awssdk dynamodb-enhanced - - ${awsjavasdk.version}-PREVIEW + ${awsjavasdk.version} com.amazonaws @@ -204,7 +203,7 @@ software.amazon.awssdk.crt aws-crt - 0.5.1 + ${awscrt.version} compile diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/AwsCrtClientBenchmark.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/AwsCrtClientBenchmark.java index 36402b2f61e0..d3ee289e4292 100644 --- a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/AwsCrtClientBenchmark.java +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/AwsCrtClientBenchmark.java @@ -15,38 +15,20 @@ package software.amazon.awssdk.benchmark.apicall.httpclient.async; -import static software.amazon.awssdk.benchmark.utils.BenchmarkConstant.CONCURRENT_CALLS; -import static software.amazon.awssdk.benchmark.utils.BenchmarkUtils.awaitCountdownLatchUninterruptibly; -import static software.amazon.awssdk.benchmark.utils.BenchmarkUtils.countDownUponCompletion; - -import java.util.Collection; -import java.util.concurrent.CountDownLatch; +import java.net.URI; import java.util.concurrent.TimeUnit; -import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Level; import org.openjdk.jmh.annotations.Measurement; import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OperationsPerInvocation; import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.TearDown; import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.infra.Blackhole; import org.openjdk.jmh.profile.StackProfiler; -import org.openjdk.jmh.results.RunResult; import org.openjdk.jmh.runner.Runner; import org.openjdk.jmh.runner.options.Options; import org.openjdk.jmh.runner.options.OptionsBuilder; -import software.amazon.awssdk.benchmark.apicall.httpclient.SdkHttpClientBenchmark; import software.amazon.awssdk.benchmark.utils.MockServer; -import software.amazon.awssdk.crt.io.EventLoopGroup; -import software.amazon.awssdk.crt.io.HostResolver; -import software.amazon.awssdk.http.async.SdkAsyncHttpClient; -import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; -import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; /** * Using aws-crt-client to test against local mock https server. @@ -56,66 +38,11 @@ @Measurement(iterations = 5, time = 10, timeUnit = TimeUnit.SECONDS) @Fork(2) // To reduce difference between each run @BenchmarkMode(Mode.Throughput) -public class AwsCrtClientBenchmark implements SdkHttpClientBenchmark { - - private MockServer mockServer; - private SdkAsyncHttpClient sdkHttpClient; - private ProtocolRestJsonAsyncClient client; - private EventLoopGroup eventLoopGroup; - private HostResolver hostResolver; - - @Setup(Level.Trial) - public void setup() throws Exception { - mockServer = new MockServer(); - mockServer.start(); - - int numThreads = Runtime.getRuntime().availableProcessors(); - eventLoopGroup = new EventLoopGroup(numThreads); - hostResolver = new HostResolver(eventLoopGroup); - - sdkHttpClient = AwsCrtAsyncHttpClient.builder() - .verifyPeer(false) - .eventLoopGroup(this.eventLoopGroup) - .hostResolver(this.hostResolver) - .build(); - - client = ProtocolRestJsonAsyncClient.builder() - .endpointOverride(mockServer.getHttpsUri()) - .httpClient(sdkHttpClient) - .build(); - - // Making sure the request actually succeeds - client.allTypes().join(); - } - - @TearDown(Level.Trial) - public void tearDown() throws Exception { - mockServer.stop(); - client.close(); - sdkHttpClient.close(); - hostResolver.close(); - eventLoopGroup.close(); - } - - @Override - @Benchmark - @OperationsPerInvocation(CONCURRENT_CALLS) - public void concurrentApiCall(Blackhole blackhole) { - CountDownLatch countDownLatch = new CountDownLatch(CONCURRENT_CALLS); - for (int i = 0; i < CONCURRENT_CALLS; i++) { - countDownUponCompletion(blackhole, client.allTypes(), countDownLatch); - } - - awaitCountdownLatchUninterruptibly(countDownLatch, 10, TimeUnit.SECONDS); - - } +public class AwsCrtClientBenchmark extends BaseCrtBenchmark { @Override - @Benchmark - public void sequentialApiCall(Blackhole blackhole) { - CountDownLatch countDownLatch = new CountDownLatch(1); - countDownUponCompletion(blackhole, client.allTypes(), countDownLatch); - awaitCountdownLatchUninterruptibly(countDownLatch, 1, TimeUnit.SECONDS); + protected URI getEndpointOverride(MockServer mock) { + return mock.getHttpsUri(); } public static void main(String... args) throws Exception { @@ -123,6 +50,6 @@ public static void main(String... args) throws Exception { .include(AwsCrtClientBenchmark.class.getSimpleName()) .addProfiler(StackProfiler.class) .build(); - Collection run = new Runner(opt).run(); + new Runner(opt).run(); } } diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/AwsCrtClientNonTlsBenchmark.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/AwsCrtClientNonTlsBenchmark.java index 87c8c4730848..7fdd4af3f9e2 100644 --- a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/AwsCrtClientNonTlsBenchmark.java +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/AwsCrtClientNonTlsBenchmark.java @@ -15,38 +15,20 @@ package software.amazon.awssdk.benchmark.apicall.httpclient.async; -import static software.amazon.awssdk.benchmark.utils.BenchmarkConstant.CONCURRENT_CALLS; -import static software.amazon.awssdk.benchmark.utils.BenchmarkUtils.awaitCountdownLatchUninterruptibly; -import static software.amazon.awssdk.benchmark.utils.BenchmarkUtils.countDownUponCompletion; - -import java.util.Collection; -import java.util.concurrent.CountDownLatch; +import java.net.URI; import java.util.concurrent.TimeUnit; -import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Level; import org.openjdk.jmh.annotations.Measurement; import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OperationsPerInvocation; import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.TearDown; import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.infra.Blackhole; import org.openjdk.jmh.profile.StackProfiler; -import org.openjdk.jmh.results.RunResult; import org.openjdk.jmh.runner.Runner; import org.openjdk.jmh.runner.options.Options; import org.openjdk.jmh.runner.options.OptionsBuilder; -import software.amazon.awssdk.benchmark.apicall.httpclient.SdkHttpClientBenchmark; import software.amazon.awssdk.benchmark.utils.MockServer; -import software.amazon.awssdk.crt.io.EventLoopGroup; -import software.amazon.awssdk.crt.io.HostResolver; -import software.amazon.awssdk.http.async.SdkAsyncHttpClient; -import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; -import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; /** * Using aws-crt-client to test against local mock https server. @@ -56,66 +38,11 @@ @Measurement(iterations = 5, time = 10, timeUnit = TimeUnit.SECONDS) @Fork(2) // To reduce difference between each run @BenchmarkMode(Mode.Throughput) -public class AwsCrtClientNonTlsBenchmark implements SdkHttpClientBenchmark { - - private MockServer mockServer; - private SdkAsyncHttpClient sdkHttpClient; - private ProtocolRestJsonAsyncClient client; - private EventLoopGroup eventLoopGroup; - private HostResolver hostResolver; - - @Setup(Level.Trial) - public void setup() throws Exception { - mockServer = new MockServer(); - mockServer.start(); - - int numThreads = Runtime.getRuntime().availableProcessors(); - eventLoopGroup = new EventLoopGroup(numThreads); - hostResolver = new HostResolver(eventLoopGroup); - - sdkHttpClient = AwsCrtAsyncHttpClient.builder() - .verifyPeer(false) - .eventLoopGroup(eventLoopGroup) - .hostResolver(hostResolver) - .build(); - - client = ProtocolRestJsonAsyncClient.builder() - .endpointOverride(mockServer.getHttpUri()) - .httpClient(sdkHttpClient) - .build(); - - // Making sure the request actually succeeds - client.allTypes().join(); - } - - @TearDown(Level.Trial) - public void tearDown() throws Exception { - mockServer.stop(); - client.close(); - sdkHttpClient.close(); - hostResolver.close(); - eventLoopGroup.close(); - } - - @Override - @Benchmark - @OperationsPerInvocation(CONCURRENT_CALLS) - public void concurrentApiCall(Blackhole blackhole) { - CountDownLatch countDownLatch = new CountDownLatch(CONCURRENT_CALLS); - for (int i = 0; i < CONCURRENT_CALLS; i++) { - countDownUponCompletion(blackhole, client.allTypes(), countDownLatch); - } - - awaitCountdownLatchUninterruptibly(countDownLatch, 10, TimeUnit.SECONDS); - - } +public class AwsCrtClientNonTlsBenchmark extends BaseCrtBenchmark { @Override - @Benchmark - public void sequentialApiCall(Blackhole blackhole) { - CountDownLatch countDownLatch = new CountDownLatch(1); - countDownUponCompletion(blackhole, client.allTypes(), countDownLatch); - awaitCountdownLatchUninterruptibly(countDownLatch, 1, TimeUnit.SECONDS); + protected URI getEndpointOverride(MockServer mock) { + return mock.getHttpUri(); } public static void main(String... args) throws Exception { @@ -123,6 +50,6 @@ public static void main(String... args) throws Exception { .include(AwsCrtClientNonTlsBenchmark.class.getSimpleName()) .addProfiler(StackProfiler.class) .build(); - Collection run = new Runner(opt).run(); + new Runner(opt).run(); } } diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/BaseCrtBenchmark.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/BaseCrtBenchmark.java new file mode 100644 index 000000000000..94369a47728f --- /dev/null +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient/async/BaseCrtBenchmark.java @@ -0,0 +1,110 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.benchmark.apicall.httpclient.async; + +import static software.amazon.awssdk.benchmark.utils.BenchmarkConstant.CONCURRENT_CALLS; +import static software.amazon.awssdk.benchmark.utils.BenchmarkUtils.awaitCountdownLatchUninterruptibly; +import static software.amazon.awssdk.benchmark.utils.BenchmarkUtils.countDownUponCompletion; + +import java.net.URI; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.OperationsPerInvocation; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.infra.Blackhole; +import software.amazon.awssdk.benchmark.apicall.httpclient.SdkHttpClientBenchmark; +import software.amazon.awssdk.benchmark.utils.MockServer; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.SdkHttpConfigurationOption; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.utils.AttributeMap; + +/** + * Shared code between http and https benchmarks + */ +public abstract class BaseCrtBenchmark implements SdkHttpClientBenchmark { + + private MockServer mockServer; + private SdkAsyncHttpClient sdkHttpClient; + private ProtocolRestJsonAsyncClient client; + private EventLoopGroup eventLoopGroup; + private HostResolver hostResolver; + + @Setup(Level.Trial) + public void setup() throws Exception { + mockServer = new MockServer(); + mockServer.start(); + + int numThreads = Runtime.getRuntime().availableProcessors(); + eventLoopGroup = new EventLoopGroup(numThreads); + hostResolver = new HostResolver(eventLoopGroup); + + AttributeMap trustAllCerts = AttributeMap.builder() + .put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, Boolean.TRUE) + .build(); + + sdkHttpClient = AwsCrtAsyncHttpClient.builder() + .eventLoopGroup(this.eventLoopGroup) + .hostResolver(this.hostResolver) + .buildWithDefaults(trustAllCerts); + + client = ProtocolRestJsonAsyncClient.builder() + .endpointOverride(getEndpointOverride(mockServer)) + .httpClient(sdkHttpClient) + .build(); + + // Making sure the request actually succeeds + client.allTypes().join(); + } + + @TearDown(Level.Trial) + public void tearDown() throws Exception { + mockServer.stop(); + client.close(); + sdkHttpClient.close(); + hostResolver.close(); + eventLoopGroup.close(); + } + + @Override + @Benchmark + @OperationsPerInvocation(CONCURRENT_CALLS) + public void concurrentApiCall(Blackhole blackhole) { + CountDownLatch countDownLatch = new CountDownLatch(CONCURRENT_CALLS); + for (int i = 0; i < CONCURRENT_CALLS; i++) { + countDownUponCompletion(blackhole, client.allTypes(), countDownLatch); + } + + awaitCountdownLatchUninterruptibly(countDownLatch, 10, TimeUnit.SECONDS); + + } + + @Override + @Benchmark + public void sequentialApiCall(Blackhole blackhole) { + CountDownLatch countDownLatch = new CountDownLatch(1); + countDownUponCompletion(blackhole, client.allTypes(), countDownLatch); + awaitCountdownLatchUninterruptibly(countDownLatch, 1, TimeUnit.SECONDS); + } + + protected abstract URI getEndpointOverride(MockServer mock); +} diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/coldstart/V2DefaultClientCreationBenchmark.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/coldstart/V2DefaultClientCreationBenchmark.java index f966642a23ce..74c3a5eecda2 100644 --- a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/coldstart/V2DefaultClientCreationBenchmark.java +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/coldstart/V2DefaultClientCreationBenchmark.java @@ -57,6 +57,7 @@ public class V2DefaultClientCreationBenchmark implements SdkClientCreationBenchm @Benchmark public void createClient(Blackhole blackhole) throws Exception { client = DynamoDbClient.builder() + .endpointDiscoveryEnabled(false) .httpClient(ApacheHttpClient.builder().build()).build(); blackhole.consume(client); } diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/coldstart/V2OptimizedClientCreationBenchmark.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/coldstart/V2OptimizedClientCreationBenchmark.java index b22a7c39119e..c7826d64d821 100644 --- a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/coldstart/V2OptimizedClientCreationBenchmark.java +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/coldstart/V2OptimizedClientCreationBenchmark.java @@ -66,6 +66,7 @@ public void createClient(Blackhole blackhole) throws Exception { AwsBasicCredentials.create("test", "test"))) .httpClient(ApacheHttpClient.builder().build()) .overrideConfiguration(ClientOverrideConfiguration.builder().build()) + .endpointDiscoveryEnabled(false) .build(); blackhole.consume(client); diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index dbd9dc47f6ff..8ade17a3a0ec 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index b56373123c9d..aa36c295ed58 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../../pom.xml 4.0.0 @@ -65,6 +65,17 @@ ${awsjavasdk.version} test + + software.amazon.awssdk + aws-crt-client + ${awsjavasdk.version} + test + + + software.amazon.awssdk.crt + aws-crt + ${awscrt.version} + software.amazon.awssdk aws-core diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchAsyncStabilityTest.java deleted file mode 100644 index 826bef2e49fe..000000000000 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchAsyncStabilityTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.stability.tests.cloudwatch; - - -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.function.IntFunction; -import org.apache.commons.lang3.RandomUtils; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; -import software.amazon.awssdk.stability.tests.utils.RetryableTest; -import software.amazon.awssdk.stability.tests.utils.StabilityTestRunner; - -public class CloudWatchAsyncStabilityTest extends CloudWatchBaseStabilityTest { - private static String namespace; - - @BeforeAll - public static void setup() { - namespace = "CloudWatchAsyncStabilityTest" + System.currentTimeMillis(); - } - - @AfterAll - public static void tearDown() { - cloudWatchAsyncClient.close(); - } - - @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) - public void putMetrics_lowTpsLongInterval() { - List metrics = new ArrayList<>(); - for (int i = 0; i < 20 ; i++) { - metrics.add(MetricDatum.builder() - .metricName("test") - .values(RandomUtils.nextDouble(1d, 1000d)) - .build()); - } - - IntFunction> futureIntFunction = i -> - cloudWatchAsyncClient.putMetricData(b -> b.namespace(namespace) - .metricData(metrics)); - - runCloudWatchTest("putMetrics_lowTpsLongInterval", futureIntFunction); - } - - - private void runCloudWatchTest(String testName, IntFunction> futureIntFunction) { - StabilityTestRunner.newRunner() - .testName("CloudWatchAsyncStabilityTest." + testName) - .futureFactory(futureIntFunction) - .totalRuns(TOTAL_RUNS) - .requestCountPerRun(CONCURRENCY) - .delaysBetweenEachRun(Duration.ofSeconds(6)) - .run(); - } -} diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchBaseStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchBaseStabilityTest.java index d2fbfe87d8eb..df9c2a088a75 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchBaseStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchBaseStabilityTest.java @@ -17,24 +17,51 @@ import java.time.Duration; -import software.amazon.awssdk.core.retry.RetryPolicy; -import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.IntFunction; + +import org.apache.commons.lang3.RandomUtils; import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +import software.amazon.awssdk.stability.tests.utils.StabilityTestRunner; import software.amazon.awssdk.testutils.service.AwsTestBase; public abstract class CloudWatchBaseStabilityTest extends AwsTestBase { protected static final int CONCURRENCY = 50; protected static final int TOTAL_RUNS = 3; - protected static CloudWatchAsyncClient cloudWatchAsyncClient = - CloudWatchAsyncClient.builder() - .httpClientBuilder(NettyNioAsyncHttpClient.builder().maxConcurrency(CONCURRENCY)) - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .overrideConfiguration(b -> b - // Retry at test level - .retryPolicy(RetryPolicy.none()) - .apiCallTimeout(Duration.ofMinutes(1))) - .build(); + protected abstract CloudWatchAsyncClient getTestClient(); + protected abstract String getNamespace(); + + protected void putMetrics() { + List metrics = new ArrayList<>(); + for (int i = 0; i < 20 ; i++) { + metrics.add(MetricDatum.builder() + .metricName("test") + .values(RandomUtils.nextDouble(1d, 1000d)) + .build()); + } + + IntFunction> futureIntFunction = i -> + getTestClient().putMetricData(b -> b.namespace(getNamespace()) + .metricData(metrics)); + + runCloudWatchTest("putMetrics_lowTpsLongInterval", futureIntFunction); + } + + + private void runCloudWatchTest(String testName, IntFunction> futureIntFunction) { + StabilityTestRunner.newRunner() + .testName("CloudWatchAsyncStabilityTest." + testName) + .futureFactory(futureIntFunction) + .totalRuns(TOTAL_RUNS) + .requestCountPerRun(CONCURRENCY) + .delaysBetweenEachRun(Duration.ofSeconds(6)) + .run(); + } + } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java new file mode 100644 index 000000000000..f6986450020c --- /dev/null +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchCrtAsyncStabilityTest.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.stability.tests.cloudwatch; + + +import java.time.Duration; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; +import software.amazon.awssdk.stability.tests.utils.RetryableTest; + +public class CloudWatchCrtAsyncStabilityTest extends CloudWatchBaseStabilityTest { + private static String namespace; + private static CloudWatchAsyncClient cloudWatchAsyncClient; + + @Override + protected CloudWatchAsyncClient getTestClient() { return cloudWatchAsyncClient; } + + @Override + protected String getNamespace() { return namespace; } + + @BeforeAll + public static void setup() { + namespace = "CloudWatchCrtAsyncStabilityTest" + System.currentTimeMillis(); + + int numThreads = Runtime.getRuntime().availableProcessors(); + try (EventLoopGroup eventLoopGroup = new EventLoopGroup(numThreads); + HostResolver hostResolver = new HostResolver(eventLoopGroup)) { + + SdkAsyncHttpClient.Builder crtClientBuilder = AwsCrtAsyncHttpClient.builder() + .eventLoopGroup(eventLoopGroup) + .hostResolver(hostResolver) + .connectionMaxIdleTime(Duration.ofSeconds(5)); + + cloudWatchAsyncClient = CloudWatchAsyncClient.builder() + .httpClientBuilder(crtClientBuilder) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) + // Retry at test level + .retryPolicy(RetryPolicy.none())) + .build(); + } + } + + @AfterAll + public static void tearDown() { + cloudWatchAsyncClient.close(); + } + + @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) + public void putMetrics_lowTpsLongInterval() { + putMetrics(); + } +} diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java new file mode 100644 index 000000000000..204fc48c8dbf --- /dev/null +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/cloudwatch/CloudWatchNettyAsyncStabilityTest.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.stability.tests.cloudwatch; + + +import java.time.Duration; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; +import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; +import software.amazon.awssdk.stability.tests.utils.RetryableTest; + +public class CloudWatchNettyAsyncStabilityTest extends CloudWatchBaseStabilityTest { + private static String namespace; + private static CloudWatchAsyncClient cloudWatchAsyncClient; + + @Override + protected CloudWatchAsyncClient getTestClient() { return cloudWatchAsyncClient; } + + @Override + protected String getNamespace() { return namespace; } + + @BeforeAll + public static void setup() { + namespace = "CloudWatchNettyAsyncStabilityTest" + System.currentTimeMillis(); + cloudWatchAsyncClient = + CloudWatchAsyncClient.builder() + .httpClientBuilder(NettyNioAsyncHttpClient.builder().maxConcurrency(CONCURRENCY)) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .overrideConfiguration(b -> b + // Retry at test level + .retryPolicy(RetryPolicy.none()) + .apiCallTimeout(Duration.ofMinutes(1))) + .build(); + } + + @AfterAll + public static void tearDown() { + cloudWatchAsyncClient.close(); + } + + @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) + public void putMetrics_lowTpsLongInterval() { + putMetrics(); + } +} diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3AsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3AsyncStabilityTest.java deleted file mode 100644 index 9b5b94398f7b..000000000000 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3AsyncStabilityTest.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.stability.tests.s3; - - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.time.Duration; -import java.util.concurrent.CompletableFuture; -import java.util.function.IntFunction; -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.core.async.AsyncResponseTransformer; -import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; -import software.amazon.awssdk.stability.tests.utils.RetryableTest; -import software.amazon.awssdk.stability.tests.utils.StabilityTestRunner; -import software.amazon.awssdk.testutils.RandomTempFile; -import software.amazon.awssdk.utils.Logger; - -public class S3AsyncStabilityTest extends S3BaseStabilityTest { - private static final Logger LOGGER = Logger.loggerFor(S3AsyncStabilityTest.class); - private static String bucketName = "s3asyncstabilitytests" + System.currentTimeMillis(); - - @BeforeAll - public static void setup() { - s3NettyClient.createBucket(b -> b.bucket(bucketName)).join(); - } - - @AfterAll - public static void cleanup() { - deleteBucketAndAllContents(bucketName); - s3NettyClient.close(); - } - - @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) - @Override - public void putObject_getObject_highConcurrency() { - putObject(); - getObject(); - } - - @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) - public void largeObject_put_get_usingFile() { - uploadLargeObjectFromFile(); - downloadLargeObjectToFile(); - } - - @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) - public void getBucketAcl_lowTpsLongInterval() { - IntFunction> future = i -> s3NettyClient.getBucketAcl(b -> b.bucket(bucketName)); - StabilityTestRunner.newRunner() - .testName("S3AsyncStabilityTest.getBucketAcl_lowTpsLongInterval") - .futureFactory(future) - .requestCountPerRun(10) - .totalRuns(3) - .delaysBetweenEachRun(Duration.ofSeconds(6)) - .run(); - } - - private void downloadLargeObjectToFile() { - File randomTempFile = RandomTempFile.randomUncreatedFile(); - StabilityTestRunner.newRunner() - .testName("S3AsyncStabilityTest.downloadLargeObjectToFile") - .futures(s3NettyClient.getObject(b -> b.bucket(bucketName).key(LARGE_KEY_NAME), - AsyncResponseTransformer.toFile(randomTempFile))) - .run(); - randomTempFile.delete(); - } - - private void uploadLargeObjectFromFile() { - RandomTempFile file = null; - try { - file = new RandomTempFile((long) 2e+9); - StabilityTestRunner.newRunner() - .testName("S3AsyncStabilityTest.uploadLargeObjectFromFile") - .futures(s3NettyClient.putObject(b -> b.bucket(bucketName).key(LARGE_KEY_NAME), - AsyncRequestBody.fromFile(file))) - .run(); - } catch (IOException e) { - throw new RuntimeException("fail to create test file", e); - } finally { - if (file != null) { - file.delete(); - } - } - } - - private void putObject() { - LOGGER.info(() -> "Starting to test putObject"); - byte[] bytes = RandomStringUtils.randomAlphanumeric(10_000).getBytes(); - - IntFunction> future = i -> { - String keyName = computeKeyName(i); - return s3NettyClient.putObject(b -> b.bucket(bucketName).key(keyName), - AsyncRequestBody.fromBytes(bytes)); - }; - - StabilityTestRunner.newRunner() - .testName("S3AsyncStabilityTest.putObject") - .futureFactory(future) - .requestCountPerRun(CONCURRENCY) - .totalRuns(TOTAL_RUNS) - .delaysBetweenEachRun(Duration.ofMillis(100)) - .run(); - } - - private void getObject() { - LOGGER.info(() -> "Starting to test getObject"); - IntFunction> future = i -> { - String keyName = computeKeyName(i); - Path path = RandomTempFile.randomUncreatedFile().toPath(); - return s3NettyClient.getObject(b -> b.bucket(bucketName).key(keyName), AsyncResponseTransformer.toFile(path)); - }; - - StabilityTestRunner.newRunner() - .testName("S3AsyncStabilityTest.getObject") - .futureFactory(future) - .requestCountPerRun(CONCURRENCY) - .totalRuns(TOTAL_RUNS) - .delaysBetweenEachRun(Duration.ofMillis(100)) - .run(); - } -} diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3BaseStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3BaseStabilityTest.java index 178f8394c229..2bdcd1f13c6d 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3BaseStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3BaseStabilityTest.java @@ -15,20 +15,26 @@ package software.amazon.awssdk.stability.tests.s3; +import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.time.Duration; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; -import software.amazon.awssdk.core.retry.RetryPolicy; +import java.util.function.IntFunction; + +import org.apache.commons.lang3.RandomStringUtils; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.http.apache.ApacheHttpClient; -import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; import software.amazon.awssdk.services.s3.model.NoSuchBucketException; import software.amazon.awssdk.services.s3.model.NoSuchKeyException; +import software.amazon.awssdk.stability.tests.utils.StabilityTestRunner; import software.amazon.awssdk.testutils.RandomTempFile; import software.amazon.awssdk.testutils.service.AwsTestBase; import software.amazon.awssdk.utils.Logger; @@ -39,20 +45,9 @@ public abstract class S3BaseStabilityTest extends AwsTestBase { protected static final int TOTAL_RUNS = 50; protected static final String LARGE_KEY_NAME = "2GB"; - protected static S3AsyncClient s3NettyClient; protected static S3Client s3ApacheClient; static { - s3NettyClient = S3AsyncClient.builder() - .httpClientBuilder(NettyNioAsyncHttpClient.builder() - .maxConcurrency(CONCURRENCY)) - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) - // Retry at test level - .retryPolicy(RetryPolicy.none())) - .build(); - - s3ApacheClient = S3Client.builder() .httpClientBuilder(ApacheHttpClient.builder() .maxConnections(CONCURRENCY)) @@ -65,19 +60,98 @@ protected String computeKeyName(int i) { return "key_" + i; } - protected static void deleteBucketAndAllContents(String bucketName) { + protected abstract S3AsyncClient getTestClient(); + + protected abstract String getTestBucketName(); + + protected void doGetBucketAcl_lowTpsLongInterval() { + IntFunction> future = i -> getTestClient().getBucketAcl(b -> b.bucket(getTestBucketName())); + String className = this.getClass().getSimpleName(); + StabilityTestRunner.newRunner() + .testName(className + ".getBucketAcl_lowTpsLongInterval") + .futureFactory(future) + .requestCountPerRun(10) + .totalRuns(3) + .delaysBetweenEachRun(Duration.ofSeconds(6)) + .run(); + } + + + protected void downloadLargeObjectToFile() { + File randomTempFile = RandomTempFile.randomUncreatedFile(); + StabilityTestRunner.newRunner() + .testName("S3AsyncStabilityTest.downloadLargeObjectToFile") + .futures(getTestClient().getObject(b -> b.bucket(getTestBucketName()).key(LARGE_KEY_NAME), + AsyncResponseTransformer.toFile(randomTempFile))) + .run(); + randomTempFile.delete(); + } + + protected void uploadLargeObjectFromFile() { + RandomTempFile file = null; + try { + file = new RandomTempFile((long) 2e+9); + StabilityTestRunner.newRunner() + .testName("S3AsyncStabilityTest.uploadLargeObjectFromFile") + .futures(getTestClient().putObject(b -> b.bucket(getTestBucketName()).key(LARGE_KEY_NAME), + AsyncRequestBody.fromFile(file))) + .run(); + } catch (IOException e) { + throw new RuntimeException("fail to create test file", e); + } finally { + if (file != null) { + file.delete(); + } + } + } + + protected void putObject() { + byte[] bytes = RandomStringUtils.randomAlphanumeric(10_000).getBytes(); + + IntFunction> future = i -> { + String keyName = computeKeyName(i); + return getTestClient().putObject(b -> b.bucket(getTestBucketName()).key(keyName), + AsyncRequestBody.fromBytes(bytes)); + }; + + StabilityTestRunner.newRunner() + .testName("S3AsyncStabilityTest.putObject") + .futureFactory(future) + .requestCountPerRun(CONCURRENCY) + .totalRuns(TOTAL_RUNS) + .delaysBetweenEachRun(Duration.ofMillis(100)) + .run(); + } + + protected void getObject() { + IntFunction> future = i -> { + String keyName = computeKeyName(i); + Path path = RandomTempFile.randomUncreatedFile().toPath(); + return getTestClient().getObject(b -> b.bucket(getTestBucketName()).key(keyName), AsyncResponseTransformer.toFile(path)); + }; + + StabilityTestRunner.newRunner() + .testName("S3AsyncStabilityTest.getObject") + .futureFactory(future) + .requestCountPerRun(CONCURRENCY) + .totalRuns(TOTAL_RUNS) + .delaysBetweenEachRun(Duration.ofMillis(100)) + .run(); + } + + protected static void deleteBucketAndAllContents(S3AsyncClient client, String bucketName) { try { List> futures = new ArrayList<>(); - s3NettyClient.listObjectsV2Paginator(b -> b.bucket(bucketName)) - .subscribe(r -> r.contents().forEach(s -> futures.add(s3NettyClient.deleteObject(o -> o.bucket(bucketName).key(s.key()))))) + client.listObjectsV2Paginator(b -> b.bucket(bucketName)) + .subscribe(r -> r.contents().forEach(s -> futures.add(client.deleteObject(o -> o.bucket(bucketName).key(s.key()))))) .join(); CompletableFuture[] futureArray = futures.toArray(new CompletableFuture[0]); CompletableFuture.allOf(futureArray).join(); - s3NettyClient.deleteBucket(DeleteBucketRequest.builder().bucket(bucketName).build()).join(); + client.deleteBucket(DeleteBucketRequest.builder().bucket(bucketName).build()).join(); } catch (Exception e) { log.error(() -> "Failed to delete bucket: " +bucketName); } @@ -101,7 +175,4 @@ protected void verifyObjectExist(String bucketName, String keyName, long size) t } } - public abstract void putObject_getObject_highConcurrency(); - - public abstract void largeObject_put_get_usingFile(); } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3CrtAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3CrtAsyncStabilityTest.java new file mode 100644 index 000000000000..f9860ad9c486 --- /dev/null +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3CrtAsyncStabilityTest.java @@ -0,0 +1,77 @@ +package software.amazon.awssdk.stability.tests.s3; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; +import software.amazon.awssdk.stability.tests.utils.RetryableTest; + +import java.time.Duration; + +public class S3CrtAsyncStabilityTest extends S3BaseStabilityTest { + + private static String bucketName = "s3crtasyncstabilitytests" + System.currentTimeMillis(); + + private static S3AsyncClient s3CrtClient; + private static EventLoopGroup eventLoopGroup; + private static HostResolver hostResolver; + + static { + + int numThreads = Runtime.getRuntime().availableProcessors(); + eventLoopGroup = new EventLoopGroup(numThreads); + hostResolver = new HostResolver(eventLoopGroup); + + SdkAsyncHttpClient.Builder httpClientBuilder = AwsCrtAsyncHttpClient.builder() + .eventLoopGroup(eventLoopGroup) + .hostResolver(hostResolver) + .connectionMaxIdleTime(Duration.ofSeconds(5)); + + s3CrtClient = S3AsyncClient.builder() + .httpClientBuilder(httpClientBuilder) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) + // Retry at test level + .retryPolicy(RetryPolicy.none())) + .build(); + } + + @BeforeAll + public static void setup() { + s3CrtClient.createBucket(b -> b.bucket(bucketName)).join(); + } + + @AfterAll + public static void cleanup() { + deleteBucketAndAllContents(s3CrtClient, bucketName); + s3CrtClient.close(); + } + + @Override + protected S3AsyncClient getTestClient() { return s3CrtClient; } + + @Override + protected String getTestBucketName() { return bucketName; } + + @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) + public void putObject_getObject_highConcurrency() { + putObject(); + getObject(); + } + + @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) + public void largeObject_put_get_usingFile() { + uploadLargeObjectFromFile(); + downloadLargeObjectToFile(); + } + + @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) + public void getBucketAcl_lowTpsLongInterval_Crt() { + doGetBucketAcl_lowTpsLongInterval(); + } +} diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java new file mode 100644 index 000000000000..54fd65ddfa39 --- /dev/null +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/s3/S3NettyAsyncStabilityTest.java @@ -0,0 +1,63 @@ +package software.amazon.awssdk.stability.tests.s3; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; +import software.amazon.awssdk.stability.tests.utils.RetryableTest; + +import java.time.Duration; + +public class S3NettyAsyncStabilityTest extends S3BaseStabilityTest { + + private static String bucketName = "s3nettyasyncstabilitytests" + System.currentTimeMillis(); + + private static S3AsyncClient s3NettyClient; + + static { + s3NettyClient = S3AsyncClient.builder() + .httpClientBuilder(NettyNioAsyncHttpClient.builder() + .maxConcurrency(CONCURRENCY)) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) + // Retry at test level + .retryPolicy(RetryPolicy.none())) + .build(); + } + + @BeforeAll + public static void setup() { + s3NettyClient.createBucket(b -> b.bucket(bucketName)).join(); + } + + @AfterAll + public static void cleanup() { + deleteBucketAndAllContents(s3NettyClient, bucketName); + s3NettyClient.close(); + } + + @Override + protected S3AsyncClient getTestClient() { return s3NettyClient; } + + @Override + protected String getTestBucketName() { return bucketName; } + + @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) + public void putObject_getObject_highConcurrency() { + putObject(); + getObject(); + } + + @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) + public void largeObject_put_get_usingFile() { + uploadLargeObjectFromFile(); + downloadLargeObjectToFile(); + } + + @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) + public void getBucketAcl_lowTpsLongInterval_Netty() { + doGetBucketAcl_lowTpsLongInterval(); + } +} diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsAsyncStabilityTest.java deleted file mode 100644 index a2c881e86814..000000000000 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsAsyncStabilityTest.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.stability.tests.sqs; - -import java.time.Duration; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.function.IntFunction; -import java.util.stream.Collectors; -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import software.amazon.awssdk.services.sqs.model.CreateQueueResponse; -import software.amazon.awssdk.services.sqs.model.DeleteMessageBatchRequestEntry; -import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; -import software.amazon.awssdk.stability.tests.utils.RetryableTest; -import software.amazon.awssdk.stability.tests.utils.StabilityTestRunner; -import software.amazon.awssdk.utils.Logger; - -public class SqsAsyncStabilityTest extends SqsBaseStabilityTest { - private static final Logger log = Logger.loggerFor(SqsAsyncStabilityTest.class); - private static String queueName; - private static String queueUrl; - - @BeforeAll - public static void setup() { - queueName = "sqsasyncstabilitytests" + System.currentTimeMillis(); - CreateQueueResponse createQueueResponse = sqsAsyncClient.createQueue(b -> b.queueName(queueName)).join(); - queueUrl = createQueueResponse.queueUrl(); - } - - @AfterAll - public static void tearDown() { - if (queueUrl != null) { - sqsAsyncClient.deleteQueue(b -> b.queueUrl(queueUrl)); - } - sqsAsyncClient.close(); - } - - @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) - public void sendMessage_receiveMessage() { - sendMessage(); - receiveMessage(); - } - - private void sendMessage() { - log.info(() -> String.format("Starting testing sending messages to queue %s with queueUrl %s", queueName, queueUrl)); - String messageBody = RandomStringUtils.randomAscii(1000); - IntFunction> futureIntFunction = - i -> sqsAsyncClient.sendMessage(b -> b.queueUrl(queueUrl).messageBody(messageBody)); - - runSqsTests("sendMessage", futureIntFunction); - } - - private void receiveMessage() { - log.info(() -> String.format("Starting testing receiving messages from queue %s with queueUrl %s", queueName, queueUrl)); - IntFunction> futureIntFunction = - i -> sqsAsyncClient.receiveMessage(b -> b.queueUrl(queueUrl)) - .thenApply( - r -> { - List batchRequestEntries = - r.messages().stream().map(m -> DeleteMessageBatchRequestEntry.builder().id(m.messageId()).receiptHandle(m.receiptHandle()).build()) - .collect(Collectors.toList()); - return sqsAsyncClient.deleteMessageBatch(b -> b.queueUrl(queueUrl).entries(batchRequestEntries)); - }); - runSqsTests("receiveMessage", futureIntFunction); - } - - private void runSqsTests(String testName, IntFunction> futureIntFunction) { - StabilityTestRunner.newRunner() - .testName("SqsAsyncStabilityTest." + testName) - .futureFactory(futureIntFunction) - .totalRuns(TOTAL_RUNS) - .requestCountPerRun(CONCURRENCY) - .delaysBetweenEachRun(Duration.ofMillis(100)) - .run(); - } -} diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsBaseStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsBaseStabilityTest.java index 18a52afd56e1..04bcc6180e42 100644 --- a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsBaseStabilityTest.java +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsBaseStabilityTest.java @@ -16,29 +16,71 @@ package software.amazon.awssdk.stability.tests.sqs; import java.time.Duration; -import software.amazon.awssdk.http.apache.ApacheHttpClient; -import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.IntFunction; +import java.util.stream.Collectors; + +import org.apache.commons.lang3.RandomStringUtils; import software.amazon.awssdk.services.sqs.SqsAsyncClient; -import software.amazon.awssdk.services.sqs.SqsClient; +import software.amazon.awssdk.services.sqs.model.CreateQueueResponse; +import software.amazon.awssdk.services.sqs.model.DeleteMessageBatchRequestEntry; +import software.amazon.awssdk.stability.tests.utils.StabilityTestRunner; import software.amazon.awssdk.testutils.service.AwsTestBase; +import software.amazon.awssdk.utils.Logger; public abstract class SqsBaseStabilityTest extends AwsTestBase { + private static final Logger log = Logger.loggerFor(SqsNettyAsyncStabilityTest.class); protected static final int CONCURRENCY = 100; - protected static final int TOTAL_REQUEST_NUMBER = 5000; protected static final int TOTAL_RUNS = 50; - protected static SqsAsyncClient sqsAsyncClient = SqsAsyncClient.builder() - .httpClientBuilder(NettyNioAsyncHttpClient.builder().maxConcurrency(CONCURRENCY)) - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(1))) - .build(); - protected static SqsClient sqsClient = SqsClient.builder() - .httpClientBuilder(ApacheHttpClient.builder().maxConnections(CONCURRENCY)) - .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(1))) - .build(); + protected abstract SqsAsyncClient getTestClient(); + protected abstract String getQueueUrl(); + protected abstract String getQueueName(); + + protected static String setup(SqsAsyncClient client, String queueName) { + CreateQueueResponse createQueueResponse = client.createQueue(b -> b.queueName(queueName)).join(); + return createQueueResponse.queueUrl(); + } + + protected static void tearDown(SqsAsyncClient client, String queueUrl) { + if (queueUrl != null) { + client.deleteQueue(b -> b.queueUrl(queueUrl)); + } + } + + protected void sendMessage() { + log.info(() -> String.format("Starting testing sending messages to queue %s with queueUrl %s", getQueueName(), getQueueUrl())); + String messageBody = RandomStringUtils.randomAscii(1000); + IntFunction> futureIntFunction = + i -> getTestClient().sendMessage(b -> b.queueUrl(getQueueUrl()).messageBody(messageBody)); + + runSqsTests("sendMessage", futureIntFunction); + } + protected void receiveMessage() { + log.info(() -> String.format("Starting testing receiving messages from queue %s with queueUrl %s", getQueueName(), getQueueUrl())); + IntFunction> futureIntFunction = + i -> getTestClient().receiveMessage(b -> b.queueUrl(getQueueUrl())) + .thenApply( + r -> { + List batchRequestEntries = + r.messages().stream().map(m -> DeleteMessageBatchRequestEntry.builder().id(m.messageId()).receiptHandle(m.receiptHandle()).build()) + .collect(Collectors.toList()); + return getTestClient().deleteMessageBatch(b -> b.queueUrl(getQueueUrl()).entries(batchRequestEntries)); + }); + runSqsTests("receiveMessage", futureIntFunction); + } - public abstract void sendMessage_receiveMessage(); + private void runSqsTests(String testName, IntFunction> futureIntFunction) { + StabilityTestRunner.newRunner() + .testName("SqsAsyncStabilityTest." + testName) + .futureFactory(futureIntFunction) + .totalRuns(TOTAL_RUNS) + .requestCountPerRun(CONCURRENCY) + .delaysBetweenEachRun(Duration.ofMillis(100)) + .run(); + } } diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java new file mode 100644 index 000000000000..26468b5f921c --- /dev/null +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsCrtAsyncStabilityTest.java @@ -0,0 +1,80 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.stability.tests.sqs; + +import java.time.Duration; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.crt.io.EventLoopGroup; +import software.amazon.awssdk.crt.io.HostResolver; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.services.sqs.SqsAsyncClient; +import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; +import software.amazon.awssdk.stability.tests.utils.RetryableTest; + +public class SqsCrtAsyncStabilityTest extends SqsBaseStabilityTest { + private static String queueName; + private static String queueUrl; + + private static SqsAsyncClient sqsAsyncClient; + + @Override + protected SqsAsyncClient getTestClient() { return sqsAsyncClient; } + + @Override + protected String getQueueUrl() { return queueUrl; } + + @Override + protected String getQueueName() { return queueName; } + + @BeforeAll + public static void setup() { + int numThreads = Runtime.getRuntime().availableProcessors(); + try (EventLoopGroup eventLoopGroup = new EventLoopGroup(numThreads); + HostResolver hostResolver = new HostResolver(eventLoopGroup)) { + + SdkAsyncHttpClient.Builder crtClientBuilder = AwsCrtAsyncHttpClient.builder() + .eventLoopGroup(eventLoopGroup) + .hostResolver(hostResolver) + .connectionMaxIdleTime(Duration.ofSeconds(5)); + + sqsAsyncClient = SqsAsyncClient.builder() + .httpClientBuilder(crtClientBuilder) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(10)) + // Retry at test level + .retryPolicy(RetryPolicy.none())) + .build(); + } + + queueName = "sqscrtasyncstabilitytests" + System.currentTimeMillis(); + queueUrl = setup(sqsAsyncClient, queueName); + } + + @AfterAll + public static void tearDown() { + tearDown(sqsAsyncClient, queueUrl); + sqsAsyncClient.close(); + } + + @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) + public void sendMessage_receiveMessage() { + sendMessage(); + receiveMessage(); + } +} diff --git a/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsNettyAsyncStabilityTest.java b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsNettyAsyncStabilityTest.java new file mode 100644 index 000000000000..7cdc3dee773a --- /dev/null +++ b/test/stability-tests/src/it/java/software/amazon/awssdk/stability/tests/sqs/SqsNettyAsyncStabilityTest.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.stability.tests.sqs; + +import java.time.Duration; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.services.sqs.SqsAsyncClient; +import software.amazon.awssdk.stability.tests.exceptions.StabilityTestsRetryableException; +import software.amazon.awssdk.stability.tests.utils.RetryableTest; + +public class SqsNettyAsyncStabilityTest extends SqsBaseStabilityTest { + private static String queueName; + private static String queueUrl; + + private static SqsAsyncClient sqsAsyncClient; + + @Override + protected SqsAsyncClient getTestClient() { return sqsAsyncClient; } + + @Override + protected String getQueueUrl() { return queueUrl; } + + @Override + protected String getQueueName() { return queueName; } + + @BeforeAll + public static void setup() { + sqsAsyncClient = SqsAsyncClient.builder() + .httpClientBuilder(NettyNioAsyncHttpClient.builder().maxConcurrency(CONCURRENCY)) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(1))) + .build(); + queueName = "sqsnettyasyncstabilitytests" + System.currentTimeMillis(); + queueUrl = setup(sqsAsyncClient, queueName); + } + + @AfterAll + public static void tearDown() { + tearDown(sqsAsyncClient, queueUrl); + sqsAsyncClient.close(); + } + + @RetryableTest(maxRetries = 3, retryableException = StabilityTestsRetryableException.class) + public void sendMessage_receiveMessage() { + sendMessage(); + receiveMessage(); + } +} diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index eb4d7d7d4092..1c68e98952af 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../../pom.xml test-utils @@ -43,11 +43,6 @@ - - software.amazon.awssdk - annotations - ${awsjavasdk.version} - software.amazon.awssdk utils diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/AcceptorPathMatcher.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/AcceptorPathMatcher.java deleted file mode 100644 index 5e6abf7a840b..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/AcceptorPathMatcher.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -import com.fasterxml.jackson.databind.JsonNode; -import software.amazon.awssdk.annotations.SdkProtectedApi; - -@SdkProtectedApi -public final class AcceptorPathMatcher { - - private AcceptorPathMatcher() { - } - - /** - * PathAll matcher that checks if each element of the final - * result matches the expected result. - * - * @param expectedResult Expected result given by the waiter definition - * @param finalResult Final result of the resource got by the execution - * of the JmesPath expression given by the waiter - * definition - * @return True if all elements of the final result matches - * the expected result, False otherwise - */ - public static boolean pathAll(JsonNode expectedResult, JsonNode finalResult) { - if (finalResult.isNull()) { - return false; - } - if (!finalResult.isArray()) { - throw new RuntimeException("Expected an array"); - } - for (JsonNode element : finalResult) { - if (!element.equals(expectedResult)) { - return false; - } - } - return true; - } - - /** - * PathAny matcher that checks if any element of the final - * result matches the expected result. - * - * @param expectedResult Expected result given by the waiter definition - * @param finalResult Final result of the resource got by the execution - * of the JmesPath expression given by the waiter - * definition - * @return True if any single element of the final result matches - * the expected result, False if none matched - */ - public static boolean pathAny(JsonNode expectedResult, JsonNode finalResult) { - if (finalResult.isNull()) { - return false; - } - if (!finalResult.isArray()) { - throw new RuntimeException("Expected an array"); - } - for (JsonNode element : finalResult) { - if (element.equals(expectedResult)) { - return true; - } - } - return false; - - } - - /** - * Path matcher that checks if the final result - * matches the expected result. - * - * @param expectedResult Expected result given by the waiter definition - * @param finalResult Final result of the resource got by the execution - * of the JmesPath expression given by the waiter - * definition - * @return True if the final result matches the expected result, - * False otherwise - */ - public static boolean path(JsonNode expectedResult, JsonNode finalResult) { - return finalResult.equals(expectedResult); - } -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/CompositeAcceptor.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/CompositeAcceptor.java deleted file mode 100644 index 7a06074a335b..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/CompositeAcceptor.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -import java.util.ArrayList; -import java.util.List; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.utils.Validate; - -@SdkInternalApi -class CompositeAcceptor { - - /** - * List of acceptors defined for each waiter - */ - private List> acceptors = new ArrayList<>(); - - /** - * Constructs a new Composite Acceptor with the given list of acceptors. - * Throws an assertion exception if the acceptor list is empty or null - * - * @param acceptors List of acceptors defined for each waiter. It shouldn't - * be null or empty - */ - CompositeAcceptor(List> acceptors) { - this.acceptors = Validate.paramNotNull(acceptors, "acceptors"); - } - - /** - * @return List of acceptors defined for each waiter - */ - public List> getAcceptors() { - return this.acceptors; - } - - /** - * Compares the response against each response acceptor and returns - * the state of the acceptor it matches on. If none is matched, returns - * retry state by default - * - * @param response Response object got by executing the specified - * waiter operation - * @return (Enum) Corresponding waiter state defined by the acceptor or - * retry state if none matched - */ - public WaiterState accepts(OutputT response) { - for (WaiterAcceptor acceptor : acceptors) { - if (acceptor.matches(response)) { - return acceptor.getState(); - } - } - return WaiterState.RETRY; - - } - - /** - * Compares the exception thrown against each exception acceptor and - * returns the state of the acceptor it matches on. If none is - * matched, it rethrows the exception to the caller - * - * @param exception Exception thrown by executing the specified - * waiter operation - * @return (Enum) Corresponding waiter state defined by the acceptor or - * rethrows the exception back to the caller if none matched - */ - public WaiterState accepts(ErrorT exception) { - for (WaiterAcceptor acceptor : acceptors) { - if (acceptor.matches(exception)) { - return acceptor.getState(); - } - } - throw exception; - } -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/FixedDelayStrategy.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/FixedDelayStrategy.java deleted file mode 100644 index 5fd8cceb92a6..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/FixedDelayStrategy.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -public class FixedDelayStrategy implements PollingStrategy.DelayStrategy { - - /** - * Represents default delay time in seconds - */ - private final int defaultDelayInSeconds; - - /** - * Constructs a new FixedDelayStrategy with the given - * default delay time - * - */ - public FixedDelayStrategy(int defaultDelayInSeconds) { - this.defaultDelayInSeconds = defaultDelayInSeconds; - } - - /** - * Defines a default way of delaying the polling bound by - * the default delay associated with the corresponding - * waiter definition - * - * @param pollingStrategyContext Provides the polling context required to define custom delay - */ - @Override - public void delayBeforeNextRetry(PollingStrategyContext pollingStrategyContext) throws InterruptedException { - Thread.sleep(1000L * defaultDelayInSeconds); - } -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/MaxAttemptsRetryStrategy.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/MaxAttemptsRetryStrategy.java deleted file mode 100644 index 02db1c7d139f..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/MaxAttemptsRetryStrategy.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -public class MaxAttemptsRetryStrategy implements PollingStrategy.RetryStrategy { - - /** - * Represents default number of maximum attempts allowed - * for polling - */ - private final int defaultMaxAttempts; - - /** - * Constructs a new MaxAttemptsRetryStrategy with the given - * default number of attempts - * - */ - public MaxAttemptsRetryStrategy(int defaultMaxAttempts) { - this.defaultMaxAttempts = defaultMaxAttempts; - } - - /** - * Default way of checking if polling should be retried - * or fast failed - * - * @param pollingStrategyContext Provides the polling context required to make the retry decision - * @return false if the number of re - */ - @Override - public boolean shouldRetry(PollingStrategyContext pollingStrategyContext) { - return pollingStrategyContext.getRetriesAttempted() < defaultMaxAttempts; - } -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/PollingStrategy.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/PollingStrategy.java deleted file mode 100644 index ce2e1030b4f7..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/PollingStrategy.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -public class PollingStrategy { - - /** - * Condition on whether polling should be retried. - */ - private final RetryStrategy retryStrategy; - - /** - * Delay strategy to control the sleep time between retry attempts. - */ - private final DelayStrategy delayStrategy; - - /** - * Constructs a new PollingStrategy with RetryStrategy - * and DelayStrategy defined - * - * @param retryStrategy Retry condition on whether polling should be retried. - * If null value is specified, the SDK' default retry - * condition is used. - */ - public PollingStrategy(RetryStrategy retryStrategy, DelayStrategy delayStrategy) { - this.retryStrategy = retryStrategy; - this.delayStrategy = delayStrategy; - } - - /** - * Returns the retry condition included in this Polling Strategy. - * - * @return The retry condition included in this Polling Strategy. - */ - RetryStrategy getRetryStrategy() { - return this.retryStrategy; - } - - /** - * Returns the delay strategy included in this Polling Strategy. - * - * @return The delay strategy included in this Polling Strategy. - */ - DelayStrategy getDelayStrategy() { - return this.delayStrategy; - } - - /** - * The hook for providing custom condition on whether polling of a resource - * should be retried. - */ - public interface RetryStrategy { - /** - * Returns whether polling of a resource should be retried according to the - * given polling context. - * - * @param pollingStrategyContext Provides the polling context required to make the retry decision - * @return True if it should be retried. - * @see PollingStrategyContext - */ - boolean shouldRetry(PollingStrategyContext pollingStrategyContext); - } - - /** - * The hook for providing custom delay strategy to control the sleep time - * between retries. - */ - public interface DelayStrategy { - - /** - * Defines the custom delay strategy to control the sleep time - * - * @param pollingStrategyContext Provides the polling context required to define custom delay - * @see PollingStrategyContext - */ - void delayBeforeNextRetry(PollingStrategyContext pollingStrategyContext) throws InterruptedException; - - } -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/PollingStrategyContext.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/PollingStrategyContext.java deleted file mode 100644 index 0a250a03a21f..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/PollingStrategyContext.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -import software.amazon.awssdk.annotations.SdkProtectedApi; - -@SdkProtectedApi -public class PollingStrategyContext { - - /** - * Represents the original input of the operation. - */ - private final Object originalRequest; - - /** - * Represents the number of retries made so far - */ - private final int retriesAttempted; - - /** - * Constructs a new polling strategy context with the given - * request and retries attempted required for custom polling - */ - PollingStrategyContext(Object originalRequest, int retriesAttempted) { - this.originalRequest = originalRequest; - this.retriesAttempted = retriesAttempted; - } - - /** - * @return Original input of the operation. - */ - public Object getOriginalRequest() { - return originalRequest; - } - - /** - * @return Number of retries attempted - */ - public int getRetriesAttempted() { - return retriesAttempted; - } - -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/Waiter.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/Waiter.java deleted file mode 100644 index 38d83873df94..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/Waiter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -import java.util.concurrent.Future; - -public interface Waiter { - - /** - * Polls synchronously until it is determined that the resource - * transitioned into the desired state or not. - * - * @param waiterParameters Custom provided parameters. Includes request and - * optional custom polling strategy - * @throws WaiterUnrecoverableException If the resource transitions into a failure/unexpected state. - * @throws WaiterTimedOutException If the resource doesn't transition into the desired state - * even after a certain number of retries. - */ - void run(WaiterParameters waiterParameters) - throws WaiterTimedOutException, WaiterUnrecoverableException; - - /** - * Polls asynchronously until it is determined that the resource - * transitioned into the desired state or not. Includes additional - * callback. - * - * @param waiterParameters Custom provided parameters. Includes request and - * optional custom polling strategy - * @param callback Custom callback - * @return Future object that holds the result of an asynchronous - * computation of waiter - */ - Future runAsync(WaiterParameters waiterParameters, WaiterHandler callback) - throws WaiterTimedOutException, WaiterUnrecoverableException; -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterAcceptor.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterAcceptor.java deleted file mode 100644 index 9970db2529f0..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterAcceptor.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -import software.amazon.awssdk.annotations.SdkProtectedApi; - -@SdkProtectedApi -public abstract class WaiterAcceptor { - - /** - * Default method definition that matches the response - * state with the expected state defined by the acceptor. - * Overridden by each acceptor definition of matches. - * - * @param output Response got by the execution of the operation - * @return False by default. - * When overridden, returns True if it matches, False - * otherwise - */ - public boolean matches(OutputT output) { - return false; - } - - /** - * Default method definition that matches the exception - * with the expected state defined by the acceptor. - * Overridden by each acceptor definition of matches. - * - * @param output Exception thrown by the execution of the operation - * @return False by default. - * When overridden, returns True if it matches, False otherwise - */ - public boolean matches(ErrorT output) { - return false; - } - - /** - * Abstract method to fetch the corresponding state - * - * @return Corresponding state of the resource - */ - public abstract WaiterState getState(); -} - - diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterBuilder.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterBuilder.java deleted file mode 100644 index abcae155a5f1..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterBuilder.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.ExecutorService; -import software.amazon.awssdk.annotations.SdkProtectedApi; - -@SdkProtectedApi -public class WaiterBuilder { - - private SdkFunction sdkFunction; - - private List> acceptors = new ArrayList<>(); - - private PollingStrategy defaultPollingStrategy; - - private ExecutorService executorService; - - public WaiterBuilder withSdkFunction(SdkFunction sdkFunction) { - this.sdkFunction = sdkFunction; - return this; - } - - public WaiterBuilder withAcceptors(WaiterAcceptor... acceptors) { - Collections.addAll(this.acceptors, acceptors); - return this; - } - - public WaiterBuilder withDefaultPollingStrategy(PollingStrategy pollingStrategy) { - this.defaultPollingStrategy = pollingStrategy; - return this; - } - - public WaiterBuilder withExecutorService(ExecutorService executorService) { - this.executorService = executorService; - return this; - } - - public List> getAcceptor() { - return this.acceptors; - } - - public SdkFunction getSdkFunction() { - return this.sdkFunction; - } - - PollingStrategy getDefaultPollingStrategy() { - return this.defaultPollingStrategy; - } - - public ExecutorService getExecutorService() { - return this.executorService; - } - - public Waiter build() { - return new WaiterImpl<>(this); - } - -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterExecution.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterExecution.java deleted file mode 100644 index 1f2762e21313..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterExecution.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -import software.amazon.awssdk.annotations.SdkProtectedApi; -import software.amazon.awssdk.utils.Validate; - -@SdkProtectedApi -public class WaiterExecution { - - /** - * Resource specific function that makes a call to the - * operation specified by the waiter - */ - private final SdkFunction sdkFunction; - - /** - * Represents the input of the operation. - */ - private final InputT request; - - /** - * List of acceptors defined for each waiter - */ - private final CompositeAcceptor acceptor; - - /** - * Custom polling strategy as given by the end users - */ - private final PollingStrategy pollingStrategy; - - /** - * Constructs a new waiter with all the parameters defined - * in the WaiterExecutionBuilder - * - * @param waiterExecutionBuilder Contains all the parameters required to construct a - * new waiter - */ - public WaiterExecution(WaiterExecutionBuilder waiterExecutionBuilder) { - this.sdkFunction = Validate.paramNotNull(waiterExecutionBuilder.getSdkFunction(), "sdkFunction"); - this.request = Validate.paramNotNull(waiterExecutionBuilder.getRequest(), "request"); - this.acceptor = new CompositeAcceptor<>(Validate.paramNotNull(waiterExecutionBuilder.getAcceptorsList(), "acceptors")); - this.pollingStrategy = Validate.paramNotNull(waiterExecutionBuilder.getPollingStrategy(), "pollingStrategy"); - } - - /** - * Polls until a specified resource transitions into either success or failure state or - * until the specified number of retries has been made. - * - * @return True if the resource transitions into desired state. - * @throws WaiterUnrecoverableException If the resource transitions into a failure/unexpected state. - * @throws WaiterTimedOutException If the resource doesn't transition into the desired state - * even after a certain number of retries. - */ - public boolean pollResource() throws WaiterTimedOutException, WaiterUnrecoverableException { - int retriesAttempted = 0; - while (true) { - switch (getCurrentState()) { - case SUCCESS: - return true; - case FAILURE: - throw new WaiterUnrecoverableException("Resource never entered the desired state as it failed."); - case RETRY: - PollingStrategyContext pollingStrategyContext = new PollingStrategyContext(request, retriesAttempted); - if (pollingStrategy.getRetryStrategy().shouldRetry(pollingStrategyContext)) { - safeCustomDelay(pollingStrategyContext); - retriesAttempted++; - } else { - throw new WaiterTimedOutException("Reached maximum attempts without transitioning to the desired state"); - } - break; - default: - // Ignore - } - } - } - - /** - * Fetches the current state of the resource based on the acceptor it matches - * - * @return Current state of the resource - */ - private WaiterState getCurrentState() { - try { - return acceptor.accepts(sdkFunction.apply(request)); - } catch (RuntimeException amazonServiceException) { - return acceptor.accepts((ErrorT) amazonServiceException); - } - - } - - /** - * Calls the custom delay strategy to control the sleep time - * - * @param pollingStrategyContext Provides the polling strategy context. - * Includes request and number of retries - * attempted so far. - */ - private void safeCustomDelay(PollingStrategyContext pollingStrategyContext) { - try { - pollingStrategy.getDelayStrategy().delayBeforeNextRetry(pollingStrategyContext); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new RuntimeException(e); - } - } - -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterExecutionBuilder.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterExecutionBuilder.java deleted file mode 100644 index 7d7ba9c6601c..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterExecutionBuilder.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -import java.util.ArrayList; -import java.util.List; -import software.amazon.awssdk.annotations.SdkProtectedApi; - -@SdkProtectedApi -public class WaiterExecutionBuilder { - - private SdkFunction sdkFunction; - - private InputT request; - - private PollingStrategy pollingStrategy; - - private List> acceptors = new ArrayList<>(); - - public WaiterExecutionBuilder withSdkFunction(SdkFunction sdkFunction) { - this.sdkFunction = sdkFunction; - return this; - } - - public WaiterExecutionBuilder withRequest(InputT request) { - this.request = request; - return this; - } - - - public WaiterExecutionBuilder withPollingStrategy(PollingStrategy pollingStrategy) { - this.pollingStrategy = pollingStrategy; - return this; - } - - public WaiterExecutionBuilder withAcceptors(List> acceptors) { - this.acceptors = acceptors; - return this; - } - - public InputT getRequest() { - return this.request; - } - - public List> getAcceptorsList() { - return this.acceptors; - } - - public SdkFunction getSdkFunction() { - return this.sdkFunction; - } - - public PollingStrategy getPollingStrategy() { - return this.pollingStrategy; - } - - public WaiterExecution build() { - return new WaiterExecution<>(this); - } - -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterImpl.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterImpl.java deleted file mode 100644 index 98a6804cf2b6..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterImpl.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import software.amazon.awssdk.annotations.SdkProtectedApi; -import software.amazon.awssdk.utils.Validate; - -@SdkProtectedApi -public class WaiterImpl implements Waiter { - - /** - * Represents the operation function - */ - private final SdkFunction sdkFunction; - - /** - * List of acceptors - */ - private final List> acceptors; - - /** - * Represents the default polling strategy - */ - private final PollingStrategy defaultPollingStrategy; - - private final ExecutorService executorService; - - /** - * Constructs a new waiter with the given internal parameters - * - * @param waiterBuilder Takes in default parameters and builds a - * basic waiter. Excludes request and custom - * polling strategy parameters. - */ - @SdkProtectedApi - public WaiterImpl(WaiterBuilder waiterBuilder) { - this.sdkFunction = Validate.paramNotNull(waiterBuilder.getSdkFunction(), "sdkFunction"); - this.acceptors = Validate.paramNotNull(waiterBuilder.getAcceptor(), "acceptors"); - this.defaultPollingStrategy = Validate.paramNotNull(waiterBuilder.getDefaultPollingStrategy(), "defaultPollingStrategy"); - this.executorService = Validate.paramNotNull(waiterBuilder.getExecutorService(), "executorService"); - } - - /** - * Polls synchronously until it is determined that the resource - * transitioned into the desired state or not. - * - * @param waiterParameters Custom provided parameters. Includes request and - * optional custom polling strategy - * @throws WaiterUnrecoverableException If the resource transitions into a failure/unexpected state. - * @throws WaiterTimedOutException If the resource doesn't transition into the desired state - * even after a certain number of retries. - */ - public void run(WaiterParameters waiterParameters) - throws WaiterTimedOutException, WaiterUnrecoverableException { - - Validate.paramNotNull(waiterParameters, "waiterParameters"); - @SuppressWarnings("unchecked") - InputT request = Validate.paramNotNull(waiterParameters.getRequest(), "request"); - WaiterExecution waiterExecution = new WaiterExecutionBuilder() - .withRequest(request) - .withPollingStrategy(waiterParameters.getPollingStrategy() != null ? waiterParameters.getPollingStrategy() - : defaultPollingStrategy) - .withAcceptors(acceptors) - .withSdkFunction(sdkFunction) - .build(); - - waiterExecution.pollResource(); - - } - - /** - * Polls asynchronously until it is determined that the resource - * transitioned into the desired state or not. Includes additional - * callback. - * - * @param waiterParameters Custom provided parameters. Includes request and - * optional custom polling strategy - * @param callback Custom callback - * @return Future object that holds the result of an asynchronous - * computation of waiter - */ - public Future runAsync(final WaiterParameters waiterParameters, final WaiterHandler callback) - throws WaiterTimedOutException, WaiterUnrecoverableException { - - return executorService.submit(() -> { - try { - run(waiterParameters); - callback.onWaitSuccess(waiterParameters.getRequest()); - } catch (Exception ex) { - callback.onWaitFailure(ex); - - throw ex; - } - return null; - }); - - } -} diff --git a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterParameters.java b/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterParameters.java deleted file mode 100644 index c038420bdaa5..000000000000 --- a/test/test-utils/src/main/java/software/amazon/awssdk/core/waiters/WaiterParameters.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.core.waiters; - -public final class WaiterParameters { - - /** - * Represents the input of the operation. - */ - private final InputT request; - - /** - * Represents the custom polling strategy. Will be null - * if not defined - */ - private final PollingStrategy pollingStrategy; - - public WaiterParameters() { - this.request = null; - this.pollingStrategy = null; - } - - public WaiterParameters(InputT request) { - this.request = request; - this.pollingStrategy = null; - } - - /** - * Constructs a new WaiterParameters with the given request - * and polling strategy - * - * @param request Input of the operation - * @param pollingStrategy Custom polling strategy - * @return WaiterParameters object with custom polling strategy defined - */ - private WaiterParameters(InputT request, PollingStrategy pollingStrategy) { - this.request = request; - this.pollingStrategy = pollingStrategy; - } - - /** - * Constructs a new WaiterParameters with the given request - * - * @param request Input of the operation - * @return WaiterParameters Object - */ - public WaiterParameters withRequest(InputT request) { - return new WaiterParameters<>(request, this.pollingStrategy); - } - - /** - * Constructs a new WaiterParameters with the given polling strategy - * - * @param pollingStrategy Custom polling strategy - * @return WaiterParameters Object - */ - public WaiterParameters withPollingStrategy(PollingStrategy pollingStrategy) { - return new WaiterParameters<>(this.request, pollingStrategy); - } - - /** - * @return Input of the operation - */ - public InputT getRequest() { - return this.request; - } - - /** - * @return Custom Polling Strategy - */ - public PollingStrategy getPollingStrategy() { - return this.pollingStrategy; - } -} diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 9f1f1f1c161e..9ea374cee806 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT ../../pom.xml 4.0.0 @@ -155,6 +155,11 @@ dynamodb-enhanced software.amazon.awssdk + ${awsjavasdk.version} + + + cloudwatch-metric-publisher + software.amazon.awssdk ${awsjavasdk.version}-PREVIEW diff --git a/utils/pom.xml b/utils/pom.xml index 0692909c1850..31853f892408 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 0.2.0-PREVIEW + 2.13.56-SNAPSHOT 4.0.0 diff --git a/utils/src/main/java/software/amazon/awssdk/utils/CollectionUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/CollectionUtils.java index 2931eb231de4..640a3eb8f86f 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/CollectionUtils.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/CollectionUtils.java @@ -15,12 +15,9 @@ package software.amazon.awssdk.utils; -import static java.util.Collections.unmodifiableList; -import static java.util.Collections.unmodifiableMap; - import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -78,7 +75,7 @@ public static T firstIfPresent(List list) { * desired. */ public static Map> deepCopyMap(Map> map) { - return deepCopyMap(map, () -> new HashMap<>()); + return deepCopyMap(map, () -> new LinkedHashMap<>()); } /** @@ -87,29 +84,32 @@ public static Map> deepCopyMap(Map> map) * desired. */ public static Map> deepCopyMap(Map> map, Supplier>> mapConstructor) { - return map.entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> new ArrayList<>(e.getValue()), - CollectionUtils::throwIllegalStateException, mapConstructor)); + Map> result = mapConstructor.get(); + map.forEach((k, v) -> result.put(k, new ArrayList<>(v))); + return result; + } + + public static Map> unmodifiableMapOfLists(Map> map) { + return new UnmodifiableMapOfLists<>(map); } /** * Perform a deep copy of the provided map of lists, and make the result unmodifiable. + * + * This is equivalent to calling {@link #deepCopyMap} followed by {@link #unmodifiableMapOfLists}. */ public static Map> deepUnmodifiableMap(Map> map) { - return deepUnmodifiableMap(map, () -> new HashMap<>()); + return unmodifiableMapOfLists(deepCopyMap(map)); } /** * Perform a deep copy of the provided map of lists, and make the result unmodifiable. + * + * This is equivalent to calling {@link #deepCopyMap} followed by {@link #unmodifiableMapOfLists}. */ public static Map> deepUnmodifiableMap(Map> map, Supplier>> mapConstructor) { - return unmodifiableMap(map.entrySet().stream() - .collect(Collectors.toMap( - Map.Entry::getKey, - e -> unmodifiableList(new ArrayList<>(e.getValue())), - CollectionUtils::throwIllegalStateException, - mapConstructor))); + return unmodifiableMapOfLists(deepCopyMap(map, mapConstructor)); } @@ -126,11 +126,4 @@ public static Map> deepUnmodifiableMap(Map Map mapValues(Map inputMap, Function mapper) { return inputMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> mapper.apply(e.getValue()))); } - - /** - * Dummy merger since there can't be a conflict when collecting from a map. - */ - private static T throwIllegalStateException(T left, T right) { - throw new IllegalStateException("Duplicate keys are impossible when collecting from a map"); - } } diff --git a/utils/src/main/java/software/amazon/awssdk/utils/StringUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/StringUtils.java index 5ed50d28b492..2d819b78438d 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/StringUtils.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/StringUtils.java @@ -606,6 +606,121 @@ public static String replacePrefixIgnoreCase(String str, String prefix, String r return str.replaceFirst("(?i)" + prefix, replacement); } + + public static String replaceEach(final String text, final String[] searchList, final String[] replacementList) { + // mchyzer Performance note: This creates very few new objects (one major goal) + // let me know if there are performance requests, we can create a harness to measure + + int searchLength = searchList.length; + int replacementLength = replacementList.length; + + if (isEmpty(text) || (searchLength == 0 && replacementLength == 0)) { + return text; + } + + // make sure lengths are ok, these need to be equal + if (searchLength != replacementLength) { + throw new IllegalArgumentException("Search and Replace array lengths don't match: " + + searchLength + + " vs " + + replacementLength); + } + + // keep track of which still have matches + boolean[] noMoreMatchesForReplIndex = new boolean[searchLength]; + + // index on index that the match was found + int textIndex = -1; + int replaceIndex = -1; + int tempIndex = -1; + + // index of replace array that will replace the search string found + // NOTE: logic duplicated below START + for (int i = 0; i < searchLength; i++) { + if (noMoreMatchesForReplIndex[i] || isEmpty(searchList[i]) || replacementList[i] == null) { + continue; + } + tempIndex = text.indexOf(searchList[i]); + + // see if we need to keep searching for this + if (tempIndex == -1) { + noMoreMatchesForReplIndex[i] = true; + } else { + if (textIndex == -1 || tempIndex < textIndex) { + textIndex = tempIndex; + replaceIndex = i; + } + } + } + // NOTE: logic mostly below END + + // no search strings found, we are done + if (textIndex == -1) { + return text; + } + + int start = 0; + + // get a good guess on the size of the result buffer so it doesn't have to double if it goes over a bit + int increase = 0; + + // count the replacement text elements that are larger than their corresponding text being replaced + for (int i = 0; i < searchList.length; i++) { + if (searchList[i] == null || replacementList[i] == null) { + continue; + } + int greater = replacementList[i].length() - searchList[i].length(); + if (greater > 0) { + increase += 3 * greater; // assume 3 matches + } + } + // have upper-bound at 20% increase, then let Java take over + increase = Math.min(increase, text.length() / 5); + + StringBuilder buf = new StringBuilder(text.length() + increase); + + while (textIndex != -1) { + + for (int i = start; i < textIndex; i++) { + buf.append(text.charAt(i)); + } + buf.append(replacementList[replaceIndex]); + + start = textIndex + searchList[replaceIndex].length(); + + textIndex = -1; + replaceIndex = -1; + tempIndex = -1; + // find the next earliest match + // NOTE: logic mostly duplicated above START + for (int i = 0; i < searchLength; i++) { + if (noMoreMatchesForReplIndex[i] || searchList[i] == null || + searchList[i].isEmpty() || replacementList[i] == null) { + continue; + } + tempIndex = text.indexOf(searchList[i], start); + + // see if we need to keep searching for this + if (tempIndex == -1) { + noMoreMatchesForReplIndex[i] = true; + } else { + if (textIndex == -1 || tempIndex < textIndex) { + textIndex = tempIndex; + replaceIndex = i; + } + } + } + // NOTE: logic duplicated above END + + } + + int textLength = text.length(); + for (int i = start; i < textLength; i++) { + buf.append(text.charAt(i)); + } + return buf.toString(); + } + /** * Searches a string for the first occurrence of a character specified by a list of characters. * @param s The string to search. diff --git a/utils/src/main/java/software/amazon/awssdk/utils/UnmodifiableMapOfLists.java b/utils/src/main/java/software/amazon/awssdk/utils/UnmodifiableMapOfLists.java new file mode 100644 index 000000000000..2db7d68417d6 --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/UnmodifiableMapOfLists.java @@ -0,0 +1,442 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils; + +import java.util.AbstractMap.SimpleImmutableEntry; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * An unmodifiable view of a {@code Map>}. Created using {@link CollectionUtils#unmodifiableMapOfLists(Map)}. + */ +@SdkInternalApi +class UnmodifiableMapOfLists implements Map> { + private final Map> delegate; + + UnmodifiableMapOfLists(Map> delegate) { + this.delegate = delegate; + } + + @Override + public int size() { + return delegate.size(); + } + + @Override + public boolean isEmpty() { + return delegate.isEmpty(); + } + + @Override + public boolean containsKey(Object key) { + return delegate.containsKey(key); + } + + @Override + public boolean containsValue(Object value) { + return delegate.containsValue(value); + } + + @Override + public List get(Object key) { + return delegate.get(key); + } + + @Override + public List getOrDefault(Object key, List defaultValue) { + return unmodifiableList(delegate.getOrDefault(key, defaultValue)); + } + + @Override + public List put(T key, List value) { + throw new UnsupportedOperationException(); + } + + @Override + public List remove(Object key) { + throw new UnsupportedOperationException(); + } + + @Override + public void putAll(Map> m) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public Set keySet() { + return Collections.unmodifiableSet(delegate.keySet()); + } + + @Override + public Collection> values() { + return new UnmodifiableCollection<>(delegate.values()); + } + + @Override + public Set>> entrySet() { + Set>> entries = delegate.entrySet(); + return new UnmodifiableEntrySet<>(entries); + } + + @Override + public void forEach(BiConsumer> action) { + delegate.forEach((k, v) -> action.accept(k, unmodifiableList(v))); + } + + @Override + public void replaceAll(BiFunction, ? extends List> function) { + throw new UnsupportedOperationException(); + } + + @Override + public List putIfAbsent(T key, List value) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object key, Object value) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean replace(T key, List oldValue, List newValue) { + throw new UnsupportedOperationException(); + } + + @Override + public List replace(T key, List value) { + throw new UnsupportedOperationException(); + } + + @Override + public List computeIfAbsent(T key, Function> mappingFunction) { + throw new UnsupportedOperationException(); + } + + @Override + public List computeIfPresent(T key, BiFunction, ? extends List> remappingFunction) { + throw new UnsupportedOperationException(); + } + + @Override + public List compute(T key, BiFunction, ? extends List> remappingFunction) { + throw new UnsupportedOperationException(); + } + + @Override + public List merge(T key, List value, + BiFunction, ? super List, ? extends List> remappingFunction) { + throw new UnsupportedOperationException(); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return delegate.equals(obj); + } + + @Override + public String toString() { + return delegate.toString(); + } + + private static class UnmodifiableEntrySet implements Set>> { + private final Set>> delegate; + + private UnmodifiableEntrySet(Set>> delegate) { + this.delegate = delegate; + } + + @Override + public int size() { + return delegate.size(); + } + + @Override + public boolean isEmpty() { + return delegate.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return delegate.contains(o); + } + + @Override + public Iterator>> iterator() { + return new UnmodifiableEntryIterator<>(delegate.iterator()); + } + + @Override + public void forEach(Consumer>> action) { + delegate.forEach(e -> action.accept(new SimpleImmutableEntry<>(e.getKey(), unmodifiableList(e.getValue())))); + } + + @Override + @SuppressWarnings("unchecked") + public Object[] toArray() { + Object[] result = delegate.toArray(); + for (int i = 0; i < result.length; i++) { + Entry> e = (Entry>) result[i]; + result[i] = new SimpleImmutableEntry<>(e.getKey(), unmodifiableList(e.getValue())); + } + return result; + } + + @Override + @SuppressWarnings("unchecked") + public A[] toArray(A[] a) { + // Technically this could give the caller access very brief access to the modifiable entries from a different thread, + // but that's on them. They had to have done it purposefully with a different thread, and it wouldn't be very + // reliable. + Object[] result = delegate.toArray(a); + for (int i = 0; i < result.length; i++) { + Entry> e = (Entry>) result[i]; + result[i] = new SimpleImmutableEntry<>(e.getKey(), unmodifiableList(e.getValue())); + } + return (A[]) result; + } + + @Override + public boolean add(Entry> tListEntry) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsAll(Collection c) { + return delegate.containsAll(c); + } + + @Override + public boolean addAll(Collection>> c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return delegate.equals(obj); + } + + @Override + public String toString() { + return delegate.toString(); + } + } + + private static class UnmodifiableEntryIterator implements Iterator>> { + private final Iterator>> delegate; + + private UnmodifiableEntryIterator(Iterator>> delegate) { + this.delegate = delegate; + } + + @Override + public boolean hasNext() { + return delegate.hasNext(); + } + + @Override + public Entry> next() { + Entry> next = delegate.next(); + return new SimpleImmutableEntry<>(next.getKey(), unmodifiableList(next.getValue())); + } + } + + private static class UnmodifiableCollection implements Collection> { + private final Collection> delegate; + + private UnmodifiableCollection(Collection> delegate) { + this.delegate = delegate; + } + + @Override + public int size() { + return delegate.size(); + } + + @Override + public boolean isEmpty() { + return delegate.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return delegate.contains(o); + } + + @Override + public Iterator> iterator() { + return new UnmodifiableListIterator<>(delegate.iterator()); + } + + @Override + @SuppressWarnings("unchecked") + public Object[] toArray() { + Object[] result = delegate.toArray(); + for (int i = 0; i < result.length; i++) { + result[i] = unmodifiableList((List) result[i]); + } + return result; + } + + @Override + @SuppressWarnings("unchecked") + public A[] toArray(A[] a) { + // Technically this could give the caller access very brief access to the modifiable entries from a different thread, + // but that's on them. They had to have done it purposefully with a different thread, and it wouldn't be very + // reliable. + Object[] result = delegate.toArray(a); + for (int i = 0; i < result.length; i++) { + result[i] = unmodifiableList((List) result[i]); + } + return (A[]) result; + } + + @Override + public boolean add(List us) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsAll(Collection c) { + return delegate.containsAll(c); + } + + @Override + public boolean addAll(Collection> c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return delegate.equals(obj); + } + + @Override + public String toString() { + return delegate.toString(); + } + } + + private static class UnmodifiableListIterator implements Iterator> { + private final Iterator> delegate; + + private UnmodifiableListIterator(Iterator> delegate) { + this.delegate = delegate; + } + + @Override + public boolean hasNext() { + return delegate.hasNext(); + } + + @Override + public List next() { + return unmodifiableList(delegate.next()); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return delegate.equals(obj); + } + + @Override + public String toString() { + return delegate.toString(); + } + } + + private static List unmodifiableList(List list) { + if (list == null) { + return null; + } + + return Collections.unmodifiableList(list); + } +} diff --git a/utils/src/main/java/software/amazon/awssdk/utils/http/SdkHttpUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/http/SdkHttpUtils.java index 5263849188f7..b1d2fa7773c9 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/http/SdkHttpUtils.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/http/SdkHttpUtils.java @@ -21,6 +21,7 @@ import java.net.URI; import java.net.URLDecoder; import java.net.URLEncoder; +import java.util.Collection; import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; @@ -29,8 +30,6 @@ import java.util.Optional; import java.util.Set; import java.util.function.UnaryOperator; -import java.util.regex.Matcher; -import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; import software.amazon.awssdk.annotations.SdkProtectedApi; @@ -45,17 +44,13 @@ public final class SdkHttpUtils { private static final String DEFAULT_ENCODING = "UTF-8"; /** - * Regex which matches any of the sequences that we need to fix up after - * URLEncoder.encode(). + * Characters that we need to fix up after URLEncoder.encode(). */ - private static final Pattern ENCODED_CHARACTERS_PATTERN = - Pattern.compile(Pattern.quote("+") + - "|" + - Pattern.quote("*") + - "|" + - Pattern.quote("%7E") + - "|" + - Pattern.quote("%2F")); + private static final String[] ENCODED_CHARACTERS_WITH_SLASHES = new String[] {"+", "*", "%7E", "%2F"}; + private static final String[] ENCODED_CHARACTERS_WITH_SLASHES_REPLACEMENTS = new String[] {"%20", "%2A", "~", "/"}; + + private static final String[] ENCODED_CHARACTERS_WITHOUT_SLASHES = new String[] {"+", "*", "%7E"}; + private static final String[] ENCODED_CHARACTERS_WITHOUT_SLASHES_REPLACEMENTS = new String[] {"%20", "%2A", "~"}; // List of headers that may appear only once in a request; i.e. is not a list of values. // Taken from https://github.com/apache/httpcomponents-client/blob/81c1bc4dc3ca5a3134c5c60e8beff08be2fd8792/httpclient5-cache/src/test/java/org/apache/hc/client5/http/impl/cache/HttpTestUtils.java#L69-L85 with modifications: @@ -163,27 +158,13 @@ private static String urlEncode(String value, boolean ignoreSlashes) { String encoded = invokeSafely(() -> URLEncoder.encode(value, DEFAULT_ENCODING)); - Matcher matcher = ENCODED_CHARACTERS_PATTERN.matcher(encoded); - StringBuffer buffer = new StringBuffer(encoded.length()); - - while (matcher.find()) { - String replacement = matcher.group(0); - - if ("+".equals(replacement)) { - replacement = "%20"; - } else if ("*".equals(replacement)) { - replacement = "%2A"; - } else if ("%7E".equals(replacement)) { - replacement = "~"; - } else if (ignoreSlashes && "%2F".equals(replacement)) { - replacement = "/"; - } - - matcher.appendReplacement(buffer, replacement); + if (!ignoreSlashes) { + return StringUtils.replaceEach(encoded, + ENCODED_CHARACTERS_WITHOUT_SLASHES, + ENCODED_CHARACTERS_WITHOUT_SLASHES_REPLACEMENTS); } - matcher.appendTail(buffer); - return buffer.toString(); + return StringUtils.replaceEach(encoded, ENCODED_CHARACTERS_WITH_SLASHES, ENCODED_CHARACTERS_WITH_SLASHES_REPLACEMENTS); } /** @@ -296,6 +277,21 @@ public static Stream allMatchingHeaders(Map> header .flatMap(e -> e.getValue() != null ? e.getValue().stream() : Stream.empty()); } + /** + * Perform a case-insensitive search for a particular header in the provided map of headers. + * + * @param headersToSearch The headers to search. + * @param headersToFind The headers to search for (case insensitively). + * @return A stream providing the values for the headers that matched the requested header. + */ + public static Stream allMatchingHeadersFromCollection(Map> headersToSearch, + Collection headersToFind) { + return headersToSearch.entrySet().stream() + .filter(e -> headersToFind.stream() + .anyMatch(headerToFind -> e.getKey().equalsIgnoreCase(headerToFind))) + .flatMap(e -> e.getValue() != null ? e.getValue().stream() : Stream.empty()); + } + /** * Perform a case-insensitive search for a particular header in the provided map of headers, returning the first matching * header, if one is found. @@ -310,6 +306,19 @@ public static Optional firstMatchingHeader(Map> hea return allMatchingHeaders(headers, header).findFirst(); } + /** + * Perform a case-insensitive search for a set of headers in the provided map of headers, returning the first matching + * header, if one is found. + * + * @param headersToSearch The headers to search. + * @param headersToFind The header to search for (case insensitively). + * @return The first header that matched a requested one, or empty if one was not found. + */ + public static Optional firstMatchingHeaderFromCollection(Map> headersToSearch, + Collection headersToFind) { + return allMatchingHeadersFromCollection(headersToSearch, headersToFind).findFirst(); + } + public static boolean isSingleHeader(String h) { return SINGLE_HEADERS.contains(StringUtils.lowerCase(h)); } diff --git a/utils/src/test/java/software/amazon/awssdk/utils/CollectionUtilsTest.java b/utils/src/test/java/software/amazon/awssdk/utils/CollectionUtilsTest.java index b4fdc06e5110..053d1ebc9908 100644 --- a/utils/src/test/java/software/amazon/awssdk/utils/CollectionUtilsTest.java +++ b/utils/src/test/java/software/amazon/awssdk/utils/CollectionUtilsTest.java @@ -15,15 +15,23 @@ package software.amazon.awssdk.utils; +import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; import org.junit.Test; public class CollectionUtilsTest { @@ -69,4 +77,137 @@ public void firstIfPresent_MultipleElementList_ReturnsFirstElement() { public void firstIfPresent_FirstElementNull_ReturnsNull() { assertThat(CollectionUtils.firstIfPresent(Arrays.asList(null, "bar", "baz"))).isNull(); } + + @Test + public void unmodifiableMapOfListsIsUnmodifiable() { + assertUnsupported(m -> m.clear()); + assertUnsupported(m -> m.compute(null, null)); + assertUnsupported(m -> m.computeIfAbsent(null, null)); + assertUnsupported(m -> m.computeIfPresent(null, null)); + assertUnsupported(m -> m.forEach((k, v) -> v.clear())); + assertUnsupported(m -> m.get("foo").clear()); + assertUnsupported(m -> m.getOrDefault("", emptyList()).clear()); + assertUnsupported(m -> m.getOrDefault("foo", null).clear()); + assertUnsupported(m -> m.merge(null, null, null)); + assertUnsupported(m -> m.put(null, null)); + assertUnsupported(m -> m.putAll(null)); + assertUnsupported(m -> m.putIfAbsent(null, null)); + assertUnsupported(m -> m.remove(null)); + assertUnsupported(m -> m.remove(null, null)); + assertUnsupported(m -> m.replace(null, null)); + assertUnsupported(m -> m.replace(null, null, null)); + assertUnsupported(m -> m.replaceAll(null)); + assertUnsupported(m -> m.values().clear()); + + assertUnsupported(m -> m.keySet().clear()); + assertUnsupported(m -> m.keySet().add(null)); + assertUnsupported(m -> m.keySet().addAll(null)); + assertUnsupported(m -> m.keySet().remove(null)); + assertUnsupported(m -> m.keySet().removeAll(null)); + assertUnsupported(m -> m.keySet().retainAll(null)); + + assertUnsupported(m -> m.entrySet().clear()); + assertUnsupported(m -> m.entrySet().add(null)); + assertUnsupported(m -> m.entrySet().addAll(null)); + assertUnsupported(m -> m.entrySet().remove(null)); + assertUnsupported(m -> m.entrySet().removeAll(null)); + assertUnsupported(m -> m.entrySet().retainAll(null)); + assertUnsupported(m -> m.entrySet().iterator().next().setValue(emptyList())); + + assertUnsupported(m -> m.values().clear()); + assertUnsupported(m -> m.values().add(null)); + assertUnsupported(m -> m.values().addAll(null)); + assertUnsupported(m -> m.values().remove(null)); + assertUnsupported(m -> m.values().removeAll(null)); + assertUnsupported(m -> m.values().retainAll(null)); + + assertUnsupported(m -> m.values().iterator().next().clear()); + + assertUnsupported(m -> { + Iterator>> i = m.entrySet().iterator(); + i.next(); + i.remove(); + }); + + assertUnsupported(m -> { + Iterator> i = m.values().iterator(); + i.next(); + i.remove(); + }); + + assertUnsupported(m -> { + Iterator i = m.keySet().iterator(); + i.next(); + i.remove(); + }); + } + + @Test + public void unmodifiableMapOfListsIsReadable() { + assertSupported(m -> m.containsKey("foo")); + assertSupported(m -> m.containsValue("foo")); + assertSupported(m -> m.equals(null)); + assertSupported(m -> m.forEach((k, v) -> {})); + assertSupported(m -> m.get("foo")); + assertSupported(m -> m.getOrDefault("foo", null)); + assertSupported(m -> m.hashCode()); + assertSupported(m -> m.isEmpty()); + assertSupported(m -> m.keySet()); + assertSupported(m -> m.size()); + + assertSupported(m -> m.keySet().contains(null)); + assertSupported(m -> m.keySet().containsAll(emptyList())); + assertSupported(m -> m.keySet().equals(null)); + assertSupported(m -> m.keySet().hashCode()); + assertSupported(m -> m.keySet().isEmpty()); + assertSupported(m -> m.keySet().size()); + assertSupported(m -> m.keySet().spliterator()); + assertSupported(m -> m.keySet().toArray()); + assertSupported(m -> m.keySet().toArray(new String[0])); + assertSupported(m -> m.keySet().stream()); + + assertSupported(m -> m.entrySet().contains(null)); + assertSupported(m -> m.entrySet().containsAll(emptyList())); + assertSupported(m -> m.entrySet().equals(null)); + assertSupported(m -> m.entrySet().hashCode()); + assertSupported(m -> m.entrySet().isEmpty()); + assertSupported(m -> m.entrySet().size()); + assertSupported(m -> m.entrySet().spliterator()); + assertSupported(m -> m.entrySet().toArray()); + assertSupported(m -> m.entrySet().toArray(new Map.Entry[0])); + assertSupported(m -> m.entrySet().stream()); + + assertSupported(m -> m.values().contains(null)); + assertSupported(m -> m.values().containsAll(emptyList())); + assertSupported(m -> m.values().equals(null)); + assertSupported(m -> m.values().hashCode()); + assertSupported(m -> m.values().isEmpty()); + assertSupported(m -> m.values().size()); + assertSupported(m -> m.values().spliterator()); + assertSupported(m -> m.values().toArray()); + assertSupported(m -> m.values().toArray(new Collection[0])); + assertSupported(m -> m.values().stream()); + + assertSupported(m -> m.entrySet().iterator().next()); + assertSupported(m -> m.entrySet().iterator().hasNext()); + assertSupported(m -> m.values().iterator().next()); + assertSupported(m -> m.values().iterator().hasNext()); + assertSupported(m -> m.keySet().iterator().next()); + assertSupported(m -> m.keySet().iterator().hasNext()); + } + + public void assertUnsupported(Consumer>> mutation) { + Map> map = new HashMap<>(); + map.put("foo", singletonList("bar")); + + assertThatThrownBy(() -> mutation.accept(CollectionUtils.unmodifiableMapOfLists(map))) + .isInstanceOf(UnsupportedOperationException.class); + } + + public void assertSupported(Consumer>> mutation) { + Map> map = new HashMap<>(); + map.put("foo", singletonList("bar")); + + mutation.accept(map); + } } diff --git a/utils/src/test/java/software/amazon/awssdk/utils/SdkHttpUtilsTest.java b/utils/src/test/java/software/amazon/awssdk/utils/SdkHttpUtilsTest.java index 99accd9ac2b4..0d885deb410e 100644 --- a/utils/src/test/java/software/amazon/awssdk/utils/SdkHttpUtilsTest.java +++ b/utils/src/test/java/software/amazon/awssdk/utils/SdkHttpUtilsTest.java @@ -148,4 +148,29 @@ public void headerRetrievalWorksCorrectly() { assertThat(SdkHttpUtils.firstMatchingHeader(headers, null)).isNotPresent(); assertThat(SdkHttpUtils.firstMatchingHeader(headers, "nothing")).isNotPresent(); } + + @Test + public void headersFromCollectionWorksCorrectly() { + Map> headers = new HashMap<>(); + headers.put("FOO", asList("bar", "baz")); + headers.put("foo", singletonList(null)); + headers.put("other", singletonList("foo")); + headers.put("Foo", singletonList("baz2")); + + assertThat(SdkHttpUtils.allMatchingHeadersFromCollection(headers, asList("nothing"))).isEmpty(); + assertThat(SdkHttpUtils.allMatchingHeadersFromCollection(headers, asList("foo"))) + .containsExactlyInAnyOrder("bar", "baz", null, "baz2"); + assertThat(SdkHttpUtils.allMatchingHeadersFromCollection(headers, asList("nothing", "foo"))) + .containsExactlyInAnyOrder("bar", "baz", null, "baz2"); + assertThat(SdkHttpUtils.allMatchingHeadersFromCollection(headers, asList("foo", "nothing"))) + .containsExactlyInAnyOrder("bar", "baz", null, "baz2"); + assertThat(SdkHttpUtils.allMatchingHeadersFromCollection(headers, asList("foo", "other"))) + .containsExactlyInAnyOrder("bar", "baz", null, "foo", "baz2"); + + assertThat(SdkHttpUtils.firstMatchingHeaderFromCollection(headers, asList("nothing"))).isEmpty(); + assertThat(SdkHttpUtils.firstMatchingHeaderFromCollection(headers, asList("foo"))).hasValue("bar"); + assertThat(SdkHttpUtils.firstMatchingHeaderFromCollection(headers, asList("nothing", "foo"))).hasValue("bar"); + assertThat(SdkHttpUtils.firstMatchingHeaderFromCollection(headers, asList("foo", "nothing"))).hasValue("bar"); + assertThat(SdkHttpUtils.firstMatchingHeaderFromCollection(headers, asList("foo", "other"))).hasValue("foo"); + } }